about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2013-12-26 01:01:54 -0800
committerbors <bors@rust-lang.org>2013-12-26 01:01:54 -0800
commit9477c49a7b4eec2c2a3e0d9a28c4129e3d1fa6ec (patch)
treebd57f2b50c352a4a63d0ae75ef52419e19ebf994 /src/libstd
parentd975060de6c944ca12ce5205fbc9fc7726948ae1 (diff)
parent6cad8f4f14da1dd529100779db74b03d6db20faf (diff)
downloadrust-9477c49a7b4eec2c2a3e0d9a28c4129e3d1fa6ec.tar.gz
rust-9477c49a7b4eec2c2a3e0d9a28c4129e3d1fa6ec.zip
auto merge of #10965 : alexcrichton/rust/libgreen, r=brson
This pull request extracts all scheduling functionality from libstd, moving it into its own separate crates. The new libnative and libgreen will be the new way in which 1:1 and M:N scheduling is implemented. The standard library still requires an interface to the runtime, however, (think of things like `std::comm` and `io::println`). The interface is now defined by the `Runtime` trait inside of `std::rt`.

The booting process is now that libgreen defines the start lang-item and that's it. I want to extend this soon to have libnative also have a "start lang item" but also allow libgreen and libnative to be linked together in the same process. For now though, only libgreen can be used to start a program (unless you define the start lang item yourself). Again though, I want to change this soon, I just figured that this pull request is large enough as-is.

This certainly wasn't a smooth transition, certain functionality has no equivalent in this new separation, and some functionality is now better enabled through this new system. I did my best to separate all of the commits by topic and keep things fairly bite-sized, although are indeed larger than others.

As a note, this is currently rebased on top of my `std::comm` rewrite (or at least an old copy of it), but none of those commits need reviewing (that will all happen in another pull request).
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/any.rs30
-rw-r--r--src/libstd/comm/imp.rs337
-rw-r--r--src/libstd/comm/mod.rs509
-rw-r--r--src/libstd/comm/select.rs120
-rw-r--r--src/libstd/io/fs.rs153
-rw-r--r--src/libstd/io/mod.rs8
-rw-r--r--src/libstd/io/native/file.rs965
-rw-r--r--src/libstd/io/native/mod.rs228
-rw-r--r--src/libstd/io/native/process.rs654
-rw-r--r--src/libstd/io/net/addrinfo.rs11
-rw-r--r--src/libstd/io/net/tcp.rs696
-rw-r--r--src/libstd/io/net/udp.rs223
-rw-r--r--src/libstd/io/net/unix.rs122
-rw-r--r--src/libstd/io/option.rs63
-rw-r--r--src/libstd/io/pipe.rs18
-rw-r--r--src/libstd/io/process.rs24
-rw-r--r--src/libstd/io/signal.rs17
-rw-r--r--src/libstd/io/stdio.rs54
-rw-r--r--src/libstd/io/test.rs195
-rw-r--r--src/libstd/io/timer.rs77
-rw-r--r--src/libstd/lib.rs9
-rw-r--r--src/libstd/local_data.rs1
-rw-r--r--src/libstd/logging.rs32
-rw-r--r--src/libstd/os.rs24
-rw-r--r--src/libstd/rt/basic.rs230
-rw-r--r--src/libstd/rt/borrowck.rs11
-rw-r--r--src/libstd/rt/context.rs463
-rw-r--r--src/libstd/rt/crate_map.rs2
-rw-r--r--src/libstd/rt/env.rs2
-rw-r--r--src/libstd/rt/kill.rs318
-rw-r--r--src/libstd/rt/local.rs98
-rw-r--r--src/libstd/rt/local_ptr.rs8
-rw-r--r--src/libstd/rt/mod.rs359
-rw-r--r--src/libstd/rt/rc.rs139
-rw-r--r--src/libstd/rt/rtio.rs71
-rw-r--r--src/libstd/rt/sched.rs1395
-rw-r--r--src/libstd/rt/sleeper_list.rs47
-rw-r--r--src/libstd/rt/stack.rs78
-rw-r--r--src/libstd/rt/task.rs800
-rw-r--r--src/libstd/rt/test.rs440
-rw-r--r--src/libstd/rt/thread.rs34
-rw-r--r--src/libstd/rt/tube.rs170
-rw-r--r--src/libstd/rt/unwind.rs270
-rw-r--r--src/libstd/rt/util.rs27
-rw-r--r--src/libstd/run.rs8
-rw-r--r--src/libstd/sync/arc.rs152
-rw-r--r--src/libstd/sync/atomics.rs (renamed from src/libstd/unstable/atomics.rs)9
-rw-r--r--src/libstd/sync/deque.rs (renamed from src/libstd/rt/deque.rs)13
-rw-r--r--src/libstd/sync/mod.rs23
-rw-r--r--src/libstd/sync/mpmc_bounded_queue.rs (renamed from src/libstd/rt/mpmc_bounded_queue.rs)22
-rw-r--r--src/libstd/sync/mpsc_queue.rs (renamed from src/libstd/rt/mpsc_queue.rs)60
-rw-r--r--src/libstd/sync/spsc_queue.rs (renamed from src/libstd/rt/spsc_queue.rs)49
-rw-r--r--src/libstd/task.rs (renamed from src/libstd/task/mod.rs)273
-rw-r--r--src/libstd/task/spawn.rs233
-rw-r--r--src/libstd/unstable/dynamic_lib.rs9
-rw-r--r--src/libstd/unstable/lang.rs16
-rw-r--r--src/libstd/unstable/mod.rs2
-rw-r--r--src/libstd/unstable/mutex.rs4
-rw-r--r--src/libstd/unstable/stack.rs275
-rw-r--r--src/libstd/unstable/sync.rs484
-rw-r--r--src/libstd/vec.rs1
61 files changed, 2414 insertions, 8751 deletions
diff --git a/src/libstd/any.rs b/src/libstd/any.rs
index 8bce687e245..45a91d01b7a 100644
--- a/src/libstd/any.rs
+++ b/src/libstd/any.rs
@@ -20,10 +20,11 @@
 
 use cast::transmute;
 use option::{Option, Some, None};
+use result::{Result, Ok, Err};
 use to_str::ToStr;
+use unstable::intrinsics::TypeId;
 use unstable::intrinsics;
 use util::Void;
-use unstable::intrinsics::TypeId;
 
 ///////////////////////////////////////////////////////////////////////////////
 // Any trait
@@ -118,13 +119,13 @@ impl<'a> AnyMutRefExt<'a> for &'a mut Any {
 /// Extension methods for a owning `Any` trait object
 pub trait AnyOwnExt {
     /// Returns the boxed value if it is of type `T`, or
-    /// `None` if it isn't.
-    fn move<T: 'static>(self) -> Option<~T>;
+    /// `Err(Self)` if it isn't.
+    fn move<T: 'static>(self) -> Result<~T, Self>;
 }
 
 impl AnyOwnExt for ~Any {
     #[inline]
-    fn move<T: 'static>(self) -> Option<~T> {
+    fn move<T: 'static>(self) -> Result<~T, ~Any> {
         if self.is::<T>() {
             unsafe {
                 // Extract the pointer to the boxed value, temporary alias with self
@@ -133,10 +134,10 @@ impl AnyOwnExt for ~Any {
                 // Prevent destructor on self being run
                 intrinsics::forget(self);
 
-                Some(ptr)
+                Ok(ptr)
             }
         } else {
-            None
+            Err(self)
         }
     }
 }
@@ -155,9 +156,8 @@ impl<'a> ToStr for &'a Any {
 
 #[cfg(test)]
 mod tests {
+    use prelude::*;
     use super::*;
-    use super::AnyRefExt;
-    use option::{Some, None};
 
     #[deriving(Eq)]
     struct Test;
@@ -384,13 +384,19 @@ mod tests {
         let a = ~8u as ~Any;
         let b = ~Test as ~Any;
 
-        assert_eq!(a.move(), Some(~8u));
-        assert_eq!(b.move(), Some(~Test));
+        match a.move::<uint>() {
+            Ok(a) => { assert_eq!(a, ~8u); }
+            Err(..) => fail!()
+        }
+        match b.move::<Test>() {
+            Ok(a) => { assert_eq!(a, ~Test); }
+            Err(..) => fail!()
+        }
 
         let a = ~8u as ~Any;
         let b = ~Test as ~Any;
 
-        assert_eq!(a.move(), None::<~Test>);
-        assert_eq!(b.move(), None::<~uint>);
+        assert!(a.move::<~Test>().is_err());
+        assert!(b.move::<~uint>().is_err());
     }
 }
diff --git a/src/libstd/comm/imp.rs b/src/libstd/comm/imp.rs
deleted file mode 100644
index bd1d6fed901..00000000000
--- a/src/libstd/comm/imp.rs
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! One of the major goals behind this channel implementation is to work
-//! seamlessly on and off the runtime. This also means that the code isn't
-//! littered with "if is_green() { ... } else { ... }". Right now, the rest of
-//! the runtime isn't quite ready to for this abstraction to be done very
-//! nicely, so the conditional "if green" blocks are all contained in this inner
-//! module.
-//!
-//! The goal of this module is to mirror what the runtime "should be", not the
-//! state that it is currently in today. You'll notice that there is no mention
-//! of schedulers or is_green inside any of the channel code, it is currently
-//! entirely contained in this one module.
-//!
-//! In the ideal world, nothing in this module exists and it is all implemented
-//! elsewhere in the runtime (in the proper location). All of this code is
-//! structured in order to easily refactor this to the correct location whenever
-//! we have the trait objects in place to serve as the boundary of the
-//! abstraction.
-
-use iter::{range, Iterator};
-use ops::Drop;
-use option::{Some, None, Option};
-use rt::local::Local;
-use rt::sched::{SchedHandle, Scheduler, TaskFromFriend};
-use rt::thread::Thread;
-use rt;
-use unstable::mutex::Mutex;
-use unstable::sync::UnsafeArc;
-
-// A task handle is a method of waking up a blocked task. The handle itself
-// is completely opaque and only has a wake() method defined on it. This
-// method will wake the method regardless of the context of the thread which
-// is currently calling wake().
-//
-// This abstraction should be able to be created when putting a task to
-// sleep. This should basically be a method on whatever the local Task is,
-// consuming the local Task.
-
-pub struct TaskHandle {
-    priv inner: TaskRepr
-}
-enum TaskRepr {
-    Green(rt::BlockedTask, *mut SchedHandle),
-    Native(NativeWakeupStyle),
-}
-enum NativeWakeupStyle {
-    ArcWakeup(UnsafeArc<Mutex>),    // shared mutex to synchronize on
-    LocalWakeup(*mut Mutex),        // synchronize on the task-local mutex
-}
-
-impl TaskHandle {
-    // Signal that this handle should be woken up. The `can_resched`
-    // argument indicates whether the current task could possibly be
-    // rescheduled or not. This does not have a lot of meaning for the
-    // native case, but for an M:N case it indicates whether a context
-    // switch can happen or not.
-    pub fn wake(self, can_resched: bool) {
-        match self.inner {
-            Green(task, handle) => {
-                // If we have a local scheduler, then use that to run the
-                // blocked task, otherwise we can use the handle to send the
-                // task back to its home.
-                if rt::in_green_task_context() {
-                    if can_resched {
-                        task.wake().map(Scheduler::run_task);
-                    } else {
-                        let mut s: ~Scheduler = Local::take();
-                        s.enqueue_blocked_task(task);
-                        Local::put(s);
-                    }
-                } else {
-                    let task = match task.wake() {
-                        Some(task) => task, None => return
-                    };
-                    // XXX: this is not an easy section of code to refactor.
-                    //      If this handle is owned by the Task (which it
-                    //      should be), then this would be a use-after-free
-                    //      because once the task is pushed onto the message
-                    //      queue, the handle is gone.
-                    //
-                    //      Currently the handle is instead owned by the
-                    //      Port/Chan pair, which means that because a
-                    //      channel is invoking this method the handle will
-                    //      continue to stay alive for the entire duration
-                    //      of this method. This will require thought when
-                    //      moving the handle into the task.
-                    unsafe { (*handle).send(TaskFromFriend(task)) }
-                }
-            }
-
-            // Note that there are no use-after-free races in this code. In
-            // the arc-case, we own the lock, and in the local case, we're
-            // using a lock so it's guranteed that they aren't running while
-            // we hold the lock.
-            Native(ArcWakeup(lock)) => {
-                unsafe {
-                    let lock = lock.get();
-                    (*lock).lock();
-                    (*lock).signal();
-                    (*lock).unlock();
-                }
-            }
-            Native(LocalWakeup(lock)) => {
-                unsafe {
-                    (*lock).lock();
-                    (*lock).signal();
-                    (*lock).unlock();
-                }
-            }
-        }
-    }
-
-    // Trashes handle to this task. This ensures that necessary memory is
-    // deallocated, and there may be some extra assertions as well.
-    pub fn trash(self) {
-        match self.inner {
-            Green(task, _) => task.assert_already_awake(),
-            Native(..) => {}
-        }
-    }
-}
-
-// This structure is an abstraction of what should be stored in the local
-// task itself. This data is currently stored inside of each channel, but
-// this should rather be stored in each task (and channels will still
-// continue to lazily initialize this data).
-
-pub struct TaskData {
-    priv handle: Option<SchedHandle>,
-    priv lock: Mutex,
-}
-
-impl TaskData {
-    pub fn new() -> TaskData {
-        TaskData {
-            handle: None,
-            lock: unsafe { Mutex::empty() },
-        }
-    }
-}
-
-impl Drop for TaskData {
-    fn drop(&mut self) {
-        unsafe { self.lock.destroy() }
-    }
-}
-
-// Now this is the really fun part. This is where all the M:N/1:1-agnostic
-// along with recv/select-agnostic blocking information goes. A "blocking
-// context" is really just a stack-allocated structure (which is probably
-// fine to be a stack-trait-object).
-//
-// This has some particularly strange interfaces, but the reason for all
-// this is to support selection/recv/1:1/M:N all in one bundle.
-
-pub struct BlockingContext<'a> {
-    priv inner: BlockingRepr<'a>
-}
-
-enum BlockingRepr<'a> {
-    GreenBlock(rt::BlockedTask, &'a mut Scheduler),
-    NativeBlock(Option<UnsafeArc<Mutex>>),
-}
-
-impl<'a> BlockingContext<'a> {
-    // Creates one blocking context. The data provided should in theory be
-    // acquired from the local task, but it is instead acquired from the
-    // channel currently.
-    //
-    // This function will call `f` with a blocking context, plus the data
-    // that it is given. This function will then return whether this task
-    // should actually go to sleep or not. If `true` is returned, then this
-    // function does not return until someone calls `wake()` on the task.
-    // If `false` is returned, then this function immediately returns.
-    //
-    // # Safety note
-    //
-    // Note that this stack closure may not be run on the same stack as when
-    // this function was called. This means that the environment of this
-    // stack closure could be unsafely aliased. This is currently prevented
-    // through the guarantee that this function will never return before `f`
-    // finishes executing.
-    pub fn one(data: &mut TaskData,
-               f: |BlockingContext, &mut TaskData| -> bool) {
-        if rt::in_green_task_context() {
-            let sched: ~Scheduler = Local::take();
-            sched.deschedule_running_task_and_then(|sched, task| {
-                let ctx = BlockingContext { inner: GreenBlock(task, sched) };
-                // no need to do something on success/failure other than
-                // returning because the `block` function for a BlockingContext
-                // takes care of reawakening itself if the blocking procedure
-                // fails. If this function is successful, then we're already
-                // blocked, and if it fails, the task will already be
-                // rescheduled.
-                f(ctx, data);
-            });
-        } else {
-            unsafe { data.lock.lock(); }
-            let ctx = BlockingContext { inner: NativeBlock(None) };
-            if f(ctx, data) {
-                unsafe { data.lock.wait(); }
-            }
-            unsafe { data.lock.unlock(); }
-        }
-    }
-
-    // Creates many blocking contexts. The intended use case for this
-    // function is selection over a number of ports. This will create `amt`
-    // blocking contexts, yielding them to `f` in turn. If `f` returns
-    // false, then this function aborts and returns immediately. If `f`
-    // repeatedly returns `true` `amt` times, then this function will block.
-    pub fn many(amt: uint, f: |BlockingContext| -> bool) {
-        if rt::in_green_task_context() {
-            let sched: ~Scheduler = Local::take();
-            sched.deschedule_running_task_and_then(|sched, task| {
-                for handle in task.make_selectable(amt) {
-                    let ctx = BlockingContext {
-                        inner: GreenBlock(handle, sched)
-                    };
-                    // see comment above in `one` for why no further action is
-                    // necessary here
-                    if !f(ctx) { break }
-                }
-            });
-        } else {
-            // In the native case, our decision to block must be shared
-            // amongst all of the channels. It may be possible to
-            // stack-allocate this mutex (instead of putting it in an
-            // UnsafeArc box), but for now in order to prevent
-            // use-after-free trivially we place this into a box and then
-            // pass that around.
-            unsafe {
-                let mtx = UnsafeArc::new(Mutex::new());
-                (*mtx.get()).lock();
-                let success = range(0, amt).all(|_| {
-                    f(BlockingContext {
-                        inner: NativeBlock(Some(mtx.clone()))
-                    })
-                });
-                if success {
-                    (*mtx.get()).wait();
-                }
-                (*mtx.get()).unlock();
-            }
-        }
-    }
-
-    // This function will consume this BlockingContext, and optionally block
-    // if according to the atomic `decision` function. The semantics of this
-    // functions are:
-    //
-    //  * `slot` is required to be a `None`-slot (which is owned by the
-    //    channel)
-    //  * The `slot` will be filled in with a blocked version of the current
-    //    task (with `wake`-ability if this function is successful).
-    //  * If the `decision` function returns true, then this function
-    //    immediately returns having relinquished ownership of the task.
-    //  * If the `decision` function returns false, then the `slot` is reset
-    //    to `None` and the task is re-scheduled if necessary (remember that
-    //    the task will not resume executing before the outer `one` or
-    //    `many` function has returned. This function is expected to have a
-    //    release memory fence in order for the modifications of `to_wake` to be
-    //    visible to other tasks. Code which attempts to read `to_wake` should
-    //    have an acquiring memory fence to guarantee that this write is
-    //    visible.
-    //
-    // This function will return whether the blocking occurred or not.
-    pub fn block(self,
-                 data: &mut TaskData,
-                 slot: &mut Option<TaskHandle>,
-                 decision: || -> bool) -> bool {
-        assert!(slot.is_none());
-        match self.inner {
-            GreenBlock(task, sched) => {
-                if data.handle.is_none() {
-                    data.handle = Some(sched.make_handle());
-                }
-                let handle = data.handle.get_mut_ref() as *mut SchedHandle;
-                *slot = Some(TaskHandle { inner: Green(task, handle) });
-
-                if !decision() {
-                    match slot.take_unwrap().inner {
-                        Green(task, _) => sched.enqueue_blocked_task(task),
-                        Native(..) => unreachable!()
-                    }
-                    false
-                } else {
-                    true
-                }
-            }
-            NativeBlock(shared) => {
-                *slot = Some(TaskHandle {
-                    inner: Native(match shared {
-                        Some(arc) => ArcWakeup(arc),
-                        None => LocalWakeup(&mut data.lock as *mut Mutex),
-                    })
-                });
-
-                if !decision() {
-                    *slot = None;
-                    false
-                } else {
-                    true
-                }
-            }
-        }
-    }
-}
-
-// Agnostic method of forcing a yield of the current task
-pub fn yield_now() {
-    if rt::in_green_task_context() {
-        let sched: ~Scheduler = Local::take();
-        sched.yield_now();
-    } else {
-        Thread::yield_now();
-    }
-}
-
-// Agnostic method of "maybe yielding" in order to provide fairness
-pub fn maybe_yield() {
-    if rt::in_green_task_context() {
-        let sched: ~Scheduler = Local::take();
-        sched.maybe_yield();
-    } else {
-        // the OS decides fairness, nothing for us to do.
-    }
-}
diff --git a/src/libstd/comm/mod.rs b/src/libstd/comm/mod.rs
index 33d5d48ebdc..21db234122b 100644
--- a/src/libstd/comm/mod.rs
+++ b/src/libstd/comm/mod.rs
@@ -233,14 +233,16 @@ use iter::Iterator;
 use kinds::Send;
 use ops::Drop;
 use option::{Option, Some, None};
+use result::{Ok, Err};
+use rt::local::Local;
+use rt::task::{Task, BlockedTask};
 use rt::thread::Thread;
-use unstable::atomics::{AtomicInt, AtomicBool, SeqCst, Relaxed};
+use sync::atomics::{AtomicInt, AtomicBool, SeqCst, Relaxed};
 use vec::{ImmutableVector, OwnedVector};
 
-use spsc = rt::spsc_queue;
-use mpsc = rt::mpsc_queue;
+use spsc = sync::spsc_queue;
+use mpsc = sync::mpsc_queue;
 
-use self::imp::{TaskHandle, TaskData, BlockingContext};
 pub use self::select::Select;
 
 macro_rules! test (
@@ -248,24 +250,26 @@ macro_rules! test (
         mod $name {
             #[allow(unused_imports)];
 
-            use util;
-            use super::super::*;
+            use native;
             use prelude::*;
+            use super::*;
+            use super::super::*;
+            use task;
+            use util;
 
             fn f() $b
 
             $($a)* #[test] fn uv() { f() }
-            $($a)* #[test]
-            #[ignore(cfg(windows))] // FIXME(#11003)
-            fn native() {
-                use unstable::run_in_bare_thread;
-                run_in_bare_thread(f);
+            $($a)* #[test] fn native() {
+                use native;
+                let (p, c) = Chan::new();
+                do native::task::spawn { c.send(f()) }
+                p.recv();
             }
         }
     )
 )
 
-mod imp;
 mod select;
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -326,9 +330,7 @@ pub struct SharedChan<T> {
 struct Packet {
     cnt: AtomicInt, // How many items are on this channel
     steals: int,    // How many times has a port received without blocking?
-    to_wake: Option<TaskHandle>, // Task to wake up
-
-    data: TaskData,
+    to_wake: Option<BlockedTask>, // Task to wake up
 
     // This lock is used to wake up native threads blocked in select. The
     // `lock` field is not used because the thread blocking in select must
@@ -343,6 +345,7 @@ struct Packet {
     selection_id: uint,
     select_next: *mut Packet,
     select_prev: *mut Packet,
+    recv_cnt: int,
 }
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -358,13 +361,13 @@ impl Packet {
             cnt: AtomicInt::new(0),
             steals: 0,
             to_wake: None,
-            data: TaskData::new(),
             channels: AtomicInt::new(1),
 
             selecting: AtomicBool::new(false),
             selection_id: 0,
             select_next: 0 as *mut Packet,
             select_prev: 0 as *mut Packet,
+            recv_cnt: 0,
         }
     }
 
@@ -418,7 +421,10 @@ impl Packet {
     // This function must have had at least an acquire fence before it to be
     // properly called.
     fn wakeup(&mut self, can_resched: bool) {
-        self.to_wake.take_unwrap().wake(can_resched);
+        match self.to_wake.take_unwrap().wake() {
+            Some(task) => task.reawaken(can_resched),
+            None => {}
+        }
         self.selecting.store(false, Relaxed);
     }
 
@@ -490,7 +496,7 @@ impl Packet {
         match self.channels.fetch_sub(1, SeqCst) {
             1 => {
                 match self.cnt.swap(DISCONNECTED, SeqCst) {
-                    -1 => { self.wakeup(false); }
+                    -1 => { self.wakeup(true); }
                     DISCONNECTED => {}
                     n => { assert!(n >= 0); }
                 }
@@ -531,9 +537,6 @@ impl<T: Send> Chan<T> {
     /// port.
     ///
     /// Rust channels are infinitely buffered so this method will never block.
-    /// This method may trigger a rescheduling, however, in order to wake up a
-    /// blocked receiver (if one is present). If no scheduling is desired, then
-    /// the `send_deferred` guarantees that there will be no reschedulings.
     ///
     /// # Failure
     ///
@@ -555,15 +558,6 @@ impl<T: Send> Chan<T> {
         }
     }
 
-    /// This function is equivalent in the semantics of `send`, but it
-    /// guarantees that a rescheduling will never occur when this method is
-    /// called.
-    pub fn send_deferred(&self, t: T) {
-        if !self.try_send_deferred(t) {
-            fail!("sending on a closed channel");
-        }
-    }
-
     /// Attempts to send a value on this channel, returning whether it was
     /// successfully sent.
     ///
@@ -579,9 +573,8 @@ impl<T: Send> Chan<T> {
     /// be tolerated, then this method should be used instead.
     pub fn try_send(&self, t: T) -> bool { self.try(t, true) }
 
-    /// This function is equivalent in the semantics of `try_send`, but it
-    /// guarantees that a rescheduling will never occur when this method is
-    /// called.
+    /// This function will not stick around for very long. The purpose of this
+    /// function is to guarantee that no rescheduling is performed.
     pub fn try_send_deferred(&self, t: T) -> bool { self.try(t, false) }
 
     fn try(&self, t: T, can_resched: bool) -> bool {
@@ -606,8 +599,9 @@ impl<T: Send> Chan<T> {
                 // the TLS overhead can be a bit much.
                 n => {
                     assert!(n >= 0);
-                    if can_resched && n > 0 && n % RESCHED_FREQ == 0 {
-                        imp::maybe_yield();
+                    if n > 0 && n % RESCHED_FREQ == 0 {
+                        let task: ~Task = Local::take();
+                        task.maybe_yield();
                     }
                     true
                 }
@@ -642,25 +636,9 @@ impl<T: Send> SharedChan<T> {
         }
     }
 
-    /// This function is equivalent in the semantics of `send`, but it
-    /// guarantees that a rescheduling will never occur when this method is
-    /// called.
-    pub fn send_deferred(&self, t: T) {
-        if !self.try_send_deferred(t) {
-            fail!("sending on a closed channel");
-        }
-    }
-
     /// Equivalent method to `try_send` on the `Chan` type (using the same
     /// semantics)
-    pub fn try_send(&self, t: T) -> bool { self.try(t, true) }
-
-    /// This function is equivalent in the semantics of `try_send`, but it
-    /// guarantees that a rescheduling will never occur when this method is
-    /// called.
-    pub fn try_send_deferred(&self, t: T) -> bool { self.try(t, false) }
-
-    fn try(&self, t: T, can_resched: bool) -> bool {
+    pub fn try_send(&self, t: T) -> bool {
         unsafe {
             // Note that the multiple sender case is a little tricker
             // semantically than the single sender case. The logic for
@@ -697,10 +675,11 @@ impl<T: Send> SharedChan<T> {
 
             match (*packet).increment() {
                 DISCONNECTED => {} // oh well, we tried
-                -1 => { (*packet).wakeup(can_resched); }
+                -1 => { (*packet).wakeup(true); }
                 n => {
-                    if can_resched && n > 0 && n % RESCHED_FREQ == 0 {
-                        imp::maybe_yield();
+                    if n > 0 && n % RESCHED_FREQ == 0 {
+                        let task: ~Task = Local::take();
+                        task.maybe_yield();
                     }
                 }
             }
@@ -768,6 +747,18 @@ impl<T: Send> Port<T> {
         // This is a "best effort" situation, so if a queue is inconsistent just
         // don't worry about it.
         let this = unsafe { cast::transmute_mut(self) };
+
+        // See the comment about yielding on sends, but the same applies here.
+        // If a thread is spinning in try_recv we should try
+        unsafe {
+            let packet = this.queue.packet();
+            (*packet).recv_cnt += 1;
+            if (*packet).recv_cnt % RESCHED_FREQ == 0 {
+                let task: ~Task = Local::take();
+                task.maybe_yield();
+            }
+        }
+
         let ret = match this.queue {
             SPSC(ref mut queue) => queue.pop(),
             MPSC(ref mut queue) => match queue.pop() {
@@ -840,15 +831,22 @@ impl<T: Send> Port<T> {
         unsafe {
             this = cast::transmute_mut(self);
             packet = this.queue.packet();
-            BlockingContext::one(&mut (*packet).data, |ctx, data| {
-                ctx.block(data, &mut (*packet).to_wake, || (*packet).decrement())
+            let task: ~Task = Local::take();
+            task.deschedule(1, |task| {
+                assert!((*packet).to_wake.is_none());
+                (*packet).to_wake = Some(task);
+                if (*packet).decrement() {
+                    Ok(())
+                } else {
+                    Err((*packet).to_wake.take_unwrap())
+                }
             });
         }
 
         let data = self.try_recv_inc(false);
         if data.is_none() &&
            unsafe { (*packet).cnt.load(SeqCst) } != DISCONNECTED {
-            fail!("bug: woke up too soon");
+            fail!("bug: woke up too soon {}", unsafe { (*packet).cnt.load(SeqCst) });
         }
         return data;
     }
@@ -880,10 +878,16 @@ impl<T: Send> Drop for Port<T> {
 mod test {
     use prelude::*;
 
-    use task;
-    use rt::thread::Thread;
+    use native;
+    use os;
     use super::*;
-    use rt::test::*;
+
+    pub fn stress_factor() -> uint {
+        match os::getenv("RUST_TEST_STRESS") {
+            Some(val) => from_str::<uint>(val).unwrap(),
+            None => 1,
+        }
+    }
 
     test!(fn smoke() {
         let (p, c) = Chan::new();
@@ -910,99 +914,88 @@ mod test {
         assert_eq!(p.recv(), 1);
     })
 
-    #[test]
-    fn smoke_threads() {
+    test!(fn smoke_threads() {
         let (p, c) = Chan::new();
-        do task::spawn_sched(task::SingleThreaded) {
+        do spawn {
             c.send(1);
         }
         assert_eq!(p.recv(), 1);
-    }
+    })
 
-    #[test] #[should_fail]
-    fn smoke_port_gone() {
+    test!(fn smoke_port_gone() {
         let (p, c) = Chan::new();
         drop(p);
         c.send(1);
-    }
+    } #[should_fail])
 
-    #[test] #[should_fail]
-    fn smoke_shared_port_gone() {
+    test!(fn smoke_shared_port_gone() {
         let (p, c) = SharedChan::new();
         drop(p);
         c.send(1);
-    }
+    } #[should_fail])
 
-    #[test] #[should_fail]
-    fn smoke_shared_port_gone2() {
+    test!(fn smoke_shared_port_gone2() {
         let (p, c) = SharedChan::new();
         drop(p);
         let c2 = c.clone();
         drop(c);
         c2.send(1);
-    }
+    } #[should_fail])
 
-    #[test] #[should_fail]
-    fn port_gone_concurrent() {
+    test!(fn port_gone_concurrent() {
         let (p, c) = Chan::new();
-        do task::spawn_sched(task::SingleThreaded) {
+        do spawn {
             p.recv();
         }
         loop { c.send(1) }
-    }
+    } #[should_fail])
 
-    #[test] #[should_fail]
-    fn port_gone_concurrent_shared() {
+    test!(fn port_gone_concurrent_shared() {
         let (p, c) = SharedChan::new();
         let c1 = c.clone();
-        do task::spawn_sched(task::SingleThreaded) {
+        do spawn {
             p.recv();
         }
         loop {
             c.send(1);
             c1.send(1);
         }
-    }
+    } #[should_fail])
 
-    #[test] #[should_fail]
-    fn smoke_chan_gone() {
+    test!(fn smoke_chan_gone() {
         let (p, c) = Chan::<int>::new();
         drop(c);
         p.recv();
-    }
+    } #[should_fail])
 
-    #[test] #[should_fail]
-    fn smoke_chan_gone_shared() {
+    test!(fn smoke_chan_gone_shared() {
         let (p, c) = SharedChan::<()>::new();
         let c2 = c.clone();
         drop(c);
         drop(c2);
         p.recv();
-    }
+    } #[should_fail])
 
-    #[test] #[should_fail]
-    fn chan_gone_concurrent() {
+    test!(fn chan_gone_concurrent() {
         let (p, c) = Chan::new();
-        do task::spawn_sched(task::SingleThreaded) {
+        do spawn {
             c.send(1);
             c.send(1);
         }
         loop { p.recv(); }
-    }
+    } #[should_fail])
 
-    #[test]
-    fn stress() {
+    test!(fn stress() {
         let (p, c) = Chan::new();
-        do task::spawn_sched(task::SingleThreaded) {
+        do spawn {
             for _ in range(0, 10000) { c.send(1); }
         }
         for _ in range(0, 10000) {
             assert_eq!(p.recv(), 1);
         }
-    }
+    })
 
-    #[test]
-    fn stress_shared() {
+    test!(fn stress_shared() {
         static AMT: uint = 10000;
         static NTHREADS: uint = 8;
         let (p, c) = SharedChan::<int>::new();
@@ -1018,47 +1011,53 @@ mod test {
 
         for _ in range(0, NTHREADS) {
             let c = c.clone();
-            do task::spawn_sched(task::SingleThreaded) {
+            do spawn {
                 for _ in range(0, AMT) { c.send(1); }
             }
         }
         p1.recv();
-
-    }
+    })
 
     #[test]
     #[ignore(cfg(windows))] // FIXME(#11003)
     fn send_from_outside_runtime() {
         let (p, c) = Chan::<int>::new();
         let (p1, c1) = Chan::new();
+        let (port, chan) = SharedChan::new();
+        let chan2 = chan.clone();
         do spawn {
             c1.send(());
             for _ in range(0, 40) {
                 assert_eq!(p.recv(), 1);
             }
+            chan2.send(());
         }
         p1.recv();
-        let t = do Thread::start {
+        do native::task::spawn {
             for _ in range(0, 40) {
                 c.send(1);
             }
-        };
-        t.join();
+            chan.send(());
+        }
+        port.recv();
+        port.recv();
     }
 
     #[test]
     #[ignore(cfg(windows))] // FIXME(#11003)
     fn recv_from_outside_runtime() {
         let (p, c) = Chan::<int>::new();
-        let t = do Thread::start {
+        let (dp, dc) = Chan::new();
+        do native::task::spawn {
             for _ in range(0, 40) {
                 assert_eq!(p.recv(), 1);
             }
+            dc.send(());
         };
         for _ in range(0, 40) {
             c.send(1);
         }
-        t.join();
+        dp.recv();
     }
 
     #[test]
@@ -1066,173 +1065,132 @@ mod test {
     fn no_runtime() {
         let (p1, c1) = Chan::<int>::new();
         let (p2, c2) = Chan::<int>::new();
-        let t1 = do Thread::start {
+        let (port, chan) = SharedChan::new();
+        let chan2 = chan.clone();
+        do native::task::spawn {
             assert_eq!(p1.recv(), 1);
             c2.send(2);
-        };
-        let t2 = do Thread::start {
+            chan2.send(());
+        }
+        do native::task::spawn {
             c1.send(1);
             assert_eq!(p2.recv(), 2);
-        };
-        t1.join();
-        t2.join();
+            chan.send(());
+        }
+        port.recv();
+        port.recv();
     }
 
-    #[test]
-    fn oneshot_single_thread_close_port_first() {
+    test!(fn oneshot_single_thread_close_port_first() {
         // Simple test of closing without sending
-        do run_in_newsched_task {
-            let (port, _chan) = Chan::<int>::new();
-            { let _p = port; }
-        }
-    }
+        let (port, _chan) = Chan::<int>::new();
+        { let _p = port; }
+    })
 
-    #[test]
-    fn oneshot_single_thread_close_chan_first() {
+    test!(fn oneshot_single_thread_close_chan_first() {
         // Simple test of closing without sending
-        do run_in_newsched_task {
-            let (_port, chan) = Chan::<int>::new();
-            { let _c = chan; }
-        }
-    }
+        let (_port, chan) = Chan::<int>::new();
+        { let _c = chan; }
+    })
 
-    #[test] #[should_fail]
-    fn oneshot_single_thread_send_port_close() {
+    test!(fn oneshot_single_thread_send_port_close() {
         // Testing that the sender cleans up the payload if receiver is closed
         let (port, chan) = Chan::<~int>::new();
         { let _p = port; }
         chan.send(~0);
-    }
+    } #[should_fail])
 
-    #[test]
-    fn oneshot_single_thread_recv_chan_close() {
+    test!(fn oneshot_single_thread_recv_chan_close() {
         // Receiving on a closed chan will fail
-        do run_in_newsched_task {
-            let res = do spawntask_try {
-                let (port, chan) = Chan::<~int>::new();
-                { let _c = chan; }
-                port.recv();
-            };
-            // What is our res?
-            assert!(res.is_err());
-        }
-    }
-
-    #[test]
-    fn oneshot_single_thread_send_then_recv() {
-        do run_in_newsched_task {
+        let res = do task::try {
             let (port, chan) = Chan::<~int>::new();
-            chan.send(~10);
-            assert!(port.recv() == ~10);
-        }
-    }
+            { let _c = chan; }
+            port.recv();
+        };
+        // What is our res?
+        assert!(res.is_err());
+    })
 
-    #[test]
-    fn oneshot_single_thread_try_send_open() {
-        do run_in_newsched_task {
-            let (port, chan) = Chan::<int>::new();
-            assert!(chan.try_send(10));
-            assert!(port.recv() == 10);
-        }
-    }
+    test!(fn oneshot_single_thread_send_then_recv() {
+        let (port, chan) = Chan::<~int>::new();
+        chan.send(~10);
+        assert!(port.recv() == ~10);
+    })
 
-    #[test]
-    fn oneshot_single_thread_try_send_closed() {
-        do run_in_newsched_task {
-            let (port, chan) = Chan::<int>::new();
-            { let _p = port; }
-            assert!(!chan.try_send(10));
-        }
-    }
+    test!(fn oneshot_single_thread_try_send_open() {
+        let (port, chan) = Chan::<int>::new();
+        assert!(chan.try_send(10));
+        assert!(port.recv() == 10);
+    })
 
-    #[test]
-    fn oneshot_single_thread_try_recv_open() {
-        do run_in_newsched_task {
-            let (port, chan) = Chan::<int>::new();
-            chan.send(10);
-            assert!(port.try_recv() == Some(10));
-        }
-    }
+    test!(fn oneshot_single_thread_try_send_closed() {
+        let (port, chan) = Chan::<int>::new();
+        { let _p = port; }
+        assert!(!chan.try_send(10));
+    })
 
-    #[test]
-    fn oneshot_single_thread_try_recv_closed() {
-        do run_in_newsched_task {
-            let (port, chan) = Chan::<int>::new();
-            { let _c = chan; }
-            assert!(port.recv_opt() == None);
-        }
-    }
+    test!(fn oneshot_single_thread_try_recv_open() {
+        let (port, chan) = Chan::<int>::new();
+        chan.send(10);
+        assert!(port.try_recv() == Some(10));
+    })
 
-    #[test]
-    fn oneshot_single_thread_peek_data() {
-        do run_in_newsched_task {
-            let (port, chan) = Chan::<int>::new();
-            assert!(port.try_recv().is_none());
-            chan.send(10);
-            assert!(port.try_recv().is_some());
-        }
-    }
+    test!(fn oneshot_single_thread_try_recv_closed() {
+        let (port, chan) = Chan::<int>::new();
+        { let _c = chan; }
+        assert!(port.recv_opt() == None);
+    })
 
-    #[test]
-    fn oneshot_single_thread_peek_close() {
-        do run_in_newsched_task {
-            let (port, chan) = Chan::<int>::new();
-            { let _c = chan; }
-            assert!(port.try_recv().is_none());
-            assert!(port.try_recv().is_none());
-        }
-    }
+    test!(fn oneshot_single_thread_peek_data() {
+        let (port, chan) = Chan::<int>::new();
+        assert!(port.try_recv().is_none());
+        chan.send(10);
+        assert!(port.try_recv().is_some());
+    })
 
-    #[test]
-    fn oneshot_single_thread_peek_open() {
-        do run_in_newsched_task {
-            let (port, _) = Chan::<int>::new();
-            assert!(port.try_recv().is_none());
-        }
-    }
+    test!(fn oneshot_single_thread_peek_close() {
+        let (port, chan) = Chan::<int>::new();
+        { let _c = chan; }
+        assert!(port.try_recv().is_none());
+        assert!(port.try_recv().is_none());
+    })
 
-    #[test]
-    fn oneshot_multi_task_recv_then_send() {
-        do run_in_newsched_task {
-            let (port, chan) = Chan::<~int>::new();
-            do spawntask {
-                assert!(port.recv() == ~10);
-            }
+    test!(fn oneshot_single_thread_peek_open() {
+        let (port, _) = Chan::<int>::new();
+        assert!(port.try_recv().is_none());
+    })
 
-            chan.send(~10);
+    test!(fn oneshot_multi_task_recv_then_send() {
+        let (port, chan) = Chan::<~int>::new();
+        do spawn {
+            assert!(port.recv() == ~10);
         }
-    }
 
-    #[test]
-    fn oneshot_multi_task_recv_then_close() {
-        do run_in_newsched_task {
-            let (port, chan) = Chan::<~int>::new();
-            do spawntask_later {
-                let _chan = chan;
-            }
-            let res = do spawntask_try {
-                assert!(port.recv() == ~10);
-            };
-            assert!(res.is_err());
+        chan.send(~10);
+    })
+
+    test!(fn oneshot_multi_task_recv_then_close() {
+        let (port, chan) = Chan::<~int>::new();
+        do spawn {
+            let _chan = chan;
         }
-    }
+        let res = do task::try {
+            assert!(port.recv() == ~10);
+        };
+        assert!(res.is_err());
+    })
 
-    #[test]
-    fn oneshot_multi_thread_close_stress() {
+    test!(fn oneshot_multi_thread_close_stress() {
         stress_factor().times(|| {
-            do run_in_newsched_task {
-                let (port, chan) = Chan::<int>::new();
-                let thread = do spawntask_thread {
-                    let _p = port;
-                };
-                let _chan = chan;
-                thread.join();
+            let (port, chan) = Chan::<int>::new();
+            do spawn {
+                let _p = port;
             }
+            let _chan = chan;
         })
-    }
+    })
 
-    #[test]
-    fn oneshot_multi_thread_send_close_stress() {
+    test!(fn oneshot_multi_thread_send_close_stress() {
         stress_factor().times(|| {
             let (port, chan) = Chan::<int>::new();
             do spawn {
@@ -1242,10 +1200,9 @@ mod test {
                 chan.send(1);
             };
         })
-    }
+    })
 
-    #[test]
-    fn oneshot_multi_thread_recv_close_stress() {
+    test!(fn oneshot_multi_thread_recv_close_stress() {
         stress_factor().times(|| {
             let (port, chan) = Chan::<int>::new();
             do spawn {
@@ -1262,10 +1219,9 @@ mod test {
                 }
             };
         })
-    }
+    })
 
-    #[test]
-    fn oneshot_multi_thread_send_recv_stress() {
+    test!(fn oneshot_multi_thread_send_recv_stress() {
         stress_factor().times(|| {
             let (port, chan) = Chan::<~int>::new();
             do spawn {
@@ -1275,10 +1231,9 @@ mod test {
                 assert!(port.recv() == ~10);
             }
         })
-    }
+    })
 
-    #[test]
-    fn stream_send_recv_stress() {
+    test!(fn stream_send_recv_stress() {
         stress_factor().times(|| {
             let (port, chan) = Chan::<~int>::new();
 
@@ -1288,7 +1243,7 @@ mod test {
             fn send(chan: Chan<~int>, i: int) {
                 if i == 10 { return }
 
-                do spawntask_random {
+                do spawn {
                     chan.send(~i);
                     send(chan, i + 1);
                 }
@@ -1297,44 +1252,37 @@ mod test {
             fn recv(port: Port<~int>, i: int) {
                 if i == 10 { return }
 
-                do spawntask_random {
+                do spawn {
                     assert!(port.recv() == ~i);
                     recv(port, i + 1);
                 };
             }
         })
-    }
+    })
 
-    #[test]
-    fn recv_a_lot() {
+    test!(fn recv_a_lot() {
         // Regression test that we don't run out of stack in scheduler context
-        do run_in_newsched_task {
-            let (port, chan) = Chan::new();
-            10000.times(|| { chan.send(()) });
-            10000.times(|| { port.recv() });
-        }
-    }
+        let (port, chan) = Chan::new();
+        10000.times(|| { chan.send(()) });
+        10000.times(|| { port.recv() });
+    })
 
-    #[test]
-    fn shared_chan_stress() {
-        do run_in_mt_newsched_task {
-            let (port, chan) = SharedChan::new();
-            let total = stress_factor() + 100;
-            total.times(|| {
-                let chan_clone = chan.clone();
-                do spawntask_random {
-                    chan_clone.send(());
-                }
-            });
+    test!(fn shared_chan_stress() {
+        let (port, chan) = SharedChan::new();
+        let total = stress_factor() + 100;
+        total.times(|| {
+            let chan_clone = chan.clone();
+            do spawn {
+                chan_clone.send(());
+            }
+        });
 
-            total.times(|| {
-                port.recv();
-            });
-        }
-    }
+        total.times(|| {
+            port.recv();
+        });
+    })
 
-    #[test]
-    fn test_nested_recv_iter() {
+    test!(fn test_nested_recv_iter() {
         let (port, chan) = Chan::<int>::new();
         let (total_port, total_chan) = Chan::<int>::new();
 
@@ -1351,10 +1299,9 @@ mod test {
         chan.send(2);
         drop(chan);
         assert_eq!(total_port.recv(), 6);
-    }
+    })
 
-    #[test]
-    fn test_recv_iter_break() {
+    test!(fn test_recv_iter_break() {
         let (port, chan) = Chan::<int>::new();
         let (count_port, count_chan) = Chan::<int>::new();
 
@@ -1376,5 +1323,5 @@ mod test {
         chan.try_send(2);
         drop(chan);
         assert_eq!(count_port.recv(), 4);
-    }
+    })
 }
diff --git a/src/libstd/comm/select.rs b/src/libstd/comm/select.rs
index bbd4cfea78d..302c9d9ea46 100644
--- a/src/libstd/comm/select.rs
+++ b/src/libstd/comm/select.rs
@@ -50,10 +50,13 @@ use kinds::Send;
 use ops::Drop;
 use option::{Some, None, Option};
 use ptr::RawPtr;
-use super::imp::BlockingContext;
-use super::{Packet, Port, imp};
+use result::{Ok, Err};
+use rt::local::Local;
+use rt::task::Task;
+use super::{Packet, Port};
+use sync::atomics::{Relaxed, SeqCst};
+use task;
 use uint;
-use unstable::atomics::{Relaxed, SeqCst};
 
 macro_rules! select {
     (
@@ -184,19 +187,22 @@ impl Select {
             // Acquire a number of blocking contexts, and block on each one
             // sequentially until one fails. If one fails, then abort
             // immediately so we can go unblock on all the other ports.
-            BlockingContext::many(amt, |ctx| {
+            let task: ~Task = Local::take();
+            task.deschedule(amt, |task| {
+                // Prepare for the block
                 let (i, packet) = iter.next().unwrap();
+                assert!((*packet).to_wake.is_none());
+                (*packet).to_wake = Some(task);
                 (*packet).selecting.store(true, SeqCst);
-                if !ctx.block(&mut (*packet).data,
-                              &mut (*packet).to_wake,
-                              || (*packet).decrement()) {
+
+                if (*packet).decrement() {
+                    Ok(())
+                } else {
                     (*packet).abort_selection(false);
                     (*packet).selecting.store(false, SeqCst);
                     ready_index = i;
                     ready_id = (*packet).selection_id;
-                    false
-                } else {
-                    true
+                    Err((*packet).to_wake.take_unwrap())
                 }
             });
 
@@ -225,7 +231,7 @@ impl Select {
                 if (*packet).abort_selection(true) {
                     ready_id = (*packet).selection_id;
                     while (*packet).selecting.load(Relaxed) {
-                        imp::yield_now();
+                        task::deschedule();
                     }
                 }
             }
@@ -304,6 +310,7 @@ impl Iterator<*mut Packet> for PacketIterator {
 }
 
 #[cfg(test)]
+#[allow(unused_imports)]
 mod test {
     use super::super::*;
     use prelude::*;
@@ -359,19 +366,16 @@ mod test {
         )
     })
 
-    #[test]
-    fn unblocks() {
-        use std::io::timer;
-
+    test!(fn unblocks() {
         let (mut p1, c1) = Chan::<int>::new();
         let (mut p2, _c2) = Chan::<int>::new();
         let (p3, c3) = Chan::<int>::new();
 
         do spawn {
-            timer::sleep(3);
+            20.times(task::deschedule);
             c1.send(1);
             p3.recv();
-            timer::sleep(3);
+            20.times(task::deschedule);
         }
 
         select! (
@@ -383,18 +387,15 @@ mod test {
             a = p1.recv_opt() => { assert_eq!(a, None); },
             _b = p2.recv() => { fail!() }
         )
-    }
-
-    #[test]
-    fn both_ready() {
-        use std::io::timer;
+    })
 
+    test!(fn both_ready() {
         let (mut p1, c1) = Chan::<int>::new();
         let (mut p2, c2) = Chan::<int>::new();
         let (p3, c3) = Chan::<()>::new();
 
         do spawn {
-            timer::sleep(3);
+            20.times(task::deschedule);
             c1.send(1);
             c2.send(2);
             p3.recv();
@@ -408,11 +409,12 @@ mod test {
             a = p1.recv() => { assert_eq!(a, 1); },
             a = p2.recv() => { assert_eq!(a, 2); }
         )
+        assert_eq!(p1.try_recv(), None);
+        assert_eq!(p2.try_recv(), None);
         c3.send(());
-    }
+    })
 
-    #[test]
-    fn stress() {
+    test!(fn stress() {
         static AMT: int = 10000;
         let (mut p1, c1) = Chan::<int>::new();
         let (mut p2, c2) = Chan::<int>::new();
@@ -436,69 +438,5 @@ mod test {
             )
             c3.send(());
         }
-    }
-
-    #[test]
-    #[ignore(cfg(windows))] // FIXME(#11003)
-    fn stress_native() {
-        use std::rt::thread::Thread;
-        use std::unstable::run_in_bare_thread;
-        static AMT: int = 10000;
-
-        do run_in_bare_thread {
-            let (mut p1, c1) = Chan::<int>::new();
-            let (mut p2, c2) = Chan::<int>::new();
-            let (p3, c3) = Chan::<()>::new();
-
-            let t = do Thread::start {
-                for i in range(0, AMT) {
-                    if i % 2 == 0 {
-                        c1.send(i);
-                    } else {
-                        c2.send(i);
-                    }
-                    p3.recv();
-                }
-            };
-
-            for i in range(0, AMT) {
-                select! (
-                    i1 = p1.recv() => { assert!(i % 2 == 0 && i == i1); },
-                    i2 = p2.recv() => { assert!(i % 2 == 1 && i == i2); }
-                )
-                c3.send(());
-            }
-            t.join();
-        }
-    }
-
-    #[test]
-    #[ignore(cfg(windows))] // FIXME(#11003)
-    fn native_both_ready() {
-        use std::rt::thread::Thread;
-        use std::unstable::run_in_bare_thread;
-
-        do run_in_bare_thread {
-            let (mut p1, c1) = Chan::<int>::new();
-            let (mut p2, c2) = Chan::<int>::new();
-            let (p3, c3) = Chan::<()>::new();
-
-            let t = do Thread::start {
-                c1.send(1);
-                c2.send(2);
-                p3.recv();
-            };
-
-            select! (
-                a = p1.recv() => { assert_eq!(a, 1); },
-                b = p2.recv() => { assert_eq!(b, 2); }
-            )
-            select! (
-                a = p1.recv() => { assert_eq!(a, 1); },
-                b = p2.recv() => { assert_eq!(b, 2); }
-            )
-            c3.send(());
-            t.join();
-        }
-    }
+    })
 }
diff --git a/src/libstd/io/fs.rs b/src/libstd/io/fs.rs
index a1465ca7b33..b4838d534dc 100644
--- a/src/libstd/io/fs.rs
+++ b/src/libstd/io/fs.rs
@@ -54,7 +54,7 @@ use super::{SeekStyle, Read, Write, Open, IoError, Truncate,
 use rt::rtio::{RtioFileStream, IoFactory, LocalIo};
 use io;
 use option::{Some, None, Option};
-use result::{Ok, Err, Result};
+use result::{Ok, Err};
 use path;
 use path::{Path, GenericPath};
 use vec::{OwnedVector, ImmutableVector};
@@ -75,17 +75,6 @@ pub struct File {
     priv last_nread: int,
 }
 
-fn io_raise<T>(f: |io: &mut IoFactory| -> Result<T, IoError>) -> Option<T> {
-    let mut io = LocalIo::borrow();
-    match f(io.get()) {
-        Ok(t) => Some(t),
-        Err(ioerr) => {
-            io_error::cond.raise(ioerr);
-            None
-        }
-    }
-}
-
 impl File {
     /// Open a file at `path` in the mode specified by the `mode` and `access`
     /// arguments
@@ -131,18 +120,15 @@ impl File {
     pub fn open_mode(path: &Path,
                      mode: FileMode,
                      access: FileAccess) -> Option<File> {
-        let mut io = LocalIo::borrow();
-        match io.get().fs_open(&path.to_c_str(), mode, access) {
-            Ok(fd) => Some(File {
-                path: path.clone(),
-                fd: fd,
-                last_nread: -1
-            }),
-            Err(ioerr) => {
-                io_error::cond.raise(ioerr);
-                None
-            }
-        }
+        LocalIo::maybe_raise(|io| {
+            io.fs_open(&path.to_c_str(), mode, access).map(|fd| {
+                File {
+                    path: path.clone(),
+                    fd: fd,
+                    last_nread: -1
+                }
+            })
+        })
     }
 
     /// Attempts to open a file in read-only mode. This function is equivalent to
@@ -242,7 +228,7 @@ impl File {
 /// directory, the user lacks permissions to remove the file, or if some
 /// other filesystem-level error occurs.
 pub fn unlink(path: &Path) {
-    io_raise(|io| io.fs_unlink(&path.to_c_str()));
+    LocalIo::maybe_raise(|io| io.fs_unlink(&path.to_c_str()));
 }
 
 /// Given a path, query the file system to get information about a file,
@@ -270,7 +256,9 @@ pub fn unlink(path: &Path) {
 /// requisite permissions to perform a `stat` call on the given path or if
 /// there is no entry in the filesystem at the provided path.
 pub fn stat(path: &Path) -> FileStat {
-    io_raise(|io| io.fs_stat(&path.to_c_str())).unwrap_or_else(dummystat)
+    LocalIo::maybe_raise(|io| {
+        io.fs_stat(&path.to_c_str())
+    }).unwrap_or_else(dummystat)
 }
 
 fn dummystat() -> FileStat {
@@ -306,7 +294,9 @@ fn dummystat() -> FileStat {
 ///
 /// See `stat`
 pub fn lstat(path: &Path) -> FileStat {
-    io_raise(|io| io.fs_lstat(&path.to_c_str())).unwrap_or_else(dummystat)
+    LocalIo::maybe_raise(|io| {
+        io.fs_lstat(&path.to_c_str())
+    }).unwrap_or_else(dummystat)
 }
 
 /// Rename a file or directory to a new name.
@@ -324,7 +314,7 @@ pub fn lstat(path: &Path) -> FileStat {
 /// the process lacks permissions to view the contents, or if some other
 /// intermittent I/O error occurs.
 pub fn rename(from: &Path, to: &Path) {
-    io_raise(|io| io.fs_rename(&from.to_c_str(), &to.to_c_str()));
+    LocalIo::maybe_raise(|io| io.fs_rename(&from.to_c_str(), &to.to_c_str()));
 }
 
 /// Copies the contents of one file to another. This function will also
@@ -395,7 +385,7 @@ pub fn copy(from: &Path, to: &Path) {
 /// condition. Some possible error situations are not having the permission to
 /// change the attributes of a file or the file not existing.
 pub fn chmod(path: &Path, mode: io::FilePermission) {
-    io_raise(|io| io.fs_chmod(&path.to_c_str(), mode));
+    LocalIo::maybe_raise(|io| io.fs_chmod(&path.to_c_str(), mode));
 }
 
 /// Change the user and group owners of a file at the specified path.
@@ -404,7 +394,7 @@ pub fn chmod(path: &Path, mode: io::FilePermission) {
 ///
 /// This function will raise on the `io_error` condition on failure.
 pub fn chown(path: &Path, uid: int, gid: int) {
-    io_raise(|io| io.fs_chown(&path.to_c_str(), uid, gid));
+    LocalIo::maybe_raise(|io| io.fs_chown(&path.to_c_str(), uid, gid));
 }
 
 /// Creates a new hard link on the filesystem. The `dst` path will be a
@@ -415,7 +405,7 @@ pub fn chown(path: &Path, uid: int, gid: int) {
 ///
 /// This function will raise on the `io_error` condition on failure.
 pub fn link(src: &Path, dst: &Path) {
-    io_raise(|io| io.fs_link(&src.to_c_str(), &dst.to_c_str()));
+    LocalIo::maybe_raise(|io| io.fs_link(&src.to_c_str(), &dst.to_c_str()));
 }
 
 /// Creates a new symbolic link on the filesystem. The `dst` path will be a
@@ -425,7 +415,7 @@ pub fn link(src: &Path, dst: &Path) {
 ///
 /// This function will raise on the `io_error` condition on failure.
 pub fn symlink(src: &Path, dst: &Path) {
-    io_raise(|io| io.fs_symlink(&src.to_c_str(), &dst.to_c_str()));
+    LocalIo::maybe_raise(|io| io.fs_symlink(&src.to_c_str(), &dst.to_c_str()));
 }
 
 /// Reads a symlink, returning the file that the symlink points to.
@@ -436,7 +426,7 @@ pub fn symlink(src: &Path, dst: &Path) {
 /// conditions include reading a file that does not exist or reading a file
 /// which is not a symlink.
 pub fn readlink(path: &Path) -> Option<Path> {
-    io_raise(|io| io.fs_readlink(&path.to_c_str()))
+    LocalIo::maybe_raise(|io| io.fs_readlink(&path.to_c_str()))
 }
 
 /// Create a new, empty directory at the provided path
@@ -456,7 +446,7 @@ pub fn readlink(path: &Path) -> Option<Path> {
 /// to make a new directory at the provided path, or if the directory already
 /// exists.
 pub fn mkdir(path: &Path, mode: FilePermission) {
-    io_raise(|io| io.fs_mkdir(&path.to_c_str(), mode));
+    LocalIo::maybe_raise(|io| io.fs_mkdir(&path.to_c_str(), mode));
 }
 
 /// Remove an existing, empty directory
@@ -475,7 +465,7 @@ pub fn mkdir(path: &Path, mode: FilePermission) {
 /// to remove the directory at the provided path, or if the directory isn't
 /// empty.
 pub fn rmdir(path: &Path) {
-    io_raise(|io| io.fs_rmdir(&path.to_c_str()));
+    LocalIo::maybe_raise(|io| io.fs_rmdir(&path.to_c_str()));
 }
 
 /// Retrieve a vector containing all entries within a provided directory
@@ -502,7 +492,9 @@ pub fn rmdir(path: &Path) {
 /// the process lacks permissions to view the contents or if the `path` points
 /// at a non-directory file
 pub fn readdir(path: &Path) -> ~[Path] {
-    io_raise(|io| io.fs_readdir(&path.to_c_str(), 0)).unwrap_or_else(|| ~[])
+    LocalIo::maybe_raise(|io| {
+        io.fs_readdir(&path.to_c_str(), 0)
+    }).unwrap_or_else(|| ~[])
 }
 
 /// Returns an iterator which will recursively walk the directory structure
@@ -583,7 +575,7 @@ pub fn rmdir_recursive(path: &Path) {
 /// happens.
 // FIXME(#10301) these arguments should not be u64
 pub fn change_file_times(path: &Path, atime: u64, mtime: u64) {
-    io_raise(|io| io.fs_utime(&path.to_c_str(), atime, mtime));
+    LocalIo::maybe_raise(|io| io.fs_utime(&path.to_c_str(), atime, mtime));
 }
 
 impl Reader for File {
@@ -722,7 +714,7 @@ mod test {
         }
     }
 
-    fn tmpdir() -> TempDir {
+    pub fn tmpdir() -> TempDir {
         use os;
         use rand;
         let ret = os::tmpdir().join(format!("rust-{}", rand::random::<u32>()));
@@ -730,32 +722,7 @@ mod test {
         TempDir(ret)
     }
 
-    macro_rules! test (
-        { fn $name:ident() $b:block } => (
-            mod $name {
-                use prelude::*;
-                use io::{SeekSet, SeekCur, SeekEnd, io_error, Read, Open,
-                         ReadWrite};
-                use io;
-                use str;
-                use io::fs::{File, rmdir, mkdir, readdir, rmdir_recursive,
-                             mkdir_recursive, copy, unlink, stat, symlink, link,
-                             readlink, chmod, lstat, change_file_times};
-                use io::fs::test::tmpdir;
-                use util;
-
-                fn f() $b
-
-                #[test] fn uv() { f() }
-                #[test] fn native() {
-                    use rt::test::run_in_newsched_task;
-                    run_in_newsched_task(f);
-                }
-            }
-        )
-    )
-
-    test!(fn file_test_io_smoke_test() {
+    iotest!(fn file_test_io_smoke_test() {
         let message = "it's alright. have a good time";
         let tmpdir = tmpdir();
         let filename = &tmpdir.join("file_rt_io_file_test.txt");
@@ -775,7 +742,7 @@ mod test {
         unlink(filename);
     })
 
-    test!(fn invalid_path_raises() {
+    iotest!(fn invalid_path_raises() {
         let tmpdir = tmpdir();
         let filename = &tmpdir.join("file_that_does_not_exist.txt");
         let mut called = false;
@@ -788,7 +755,7 @@ mod test {
         assert!(called);
     })
 
-    test!(fn file_test_iounlinking_invalid_path_should_raise_condition() {
+    iotest!(fn file_test_iounlinking_invalid_path_should_raise_condition() {
         let tmpdir = tmpdir();
         let filename = &tmpdir.join("file_another_file_that_does_not_exist.txt");
         let mut called = false;
@@ -798,7 +765,7 @@ mod test {
         assert!(called);
     })
 
-    test!(fn file_test_io_non_positional_read() {
+    iotest!(fn file_test_io_non_positional_read() {
         let message: &str = "ten-four";
         let mut read_mem = [0, .. 8];
         let tmpdir = tmpdir();
@@ -823,7 +790,7 @@ mod test {
         assert_eq!(read_str, message);
     })
 
-    test!(fn file_test_io_seek_and_tell_smoke_test() {
+    iotest!(fn file_test_io_seek_and_tell_smoke_test() {
         let message = "ten-four";
         let mut read_mem = [0, .. 4];
         let set_cursor = 4 as u64;
@@ -849,7 +816,7 @@ mod test {
         assert_eq!(tell_pos_post_read, message.len() as u64);
     })
 
-    test!(fn file_test_io_seek_and_write() {
+    iotest!(fn file_test_io_seek_and_write() {
         let initial_msg =   "food-is-yummy";
         let overwrite_msg =    "-the-bar!!";
         let final_msg =     "foo-the-bar!!";
@@ -872,7 +839,7 @@ mod test {
         assert!(read_str == final_msg.to_owned());
     })
 
-    test!(fn file_test_io_seek_shakedown() {
+    iotest!(fn file_test_io_seek_shakedown() {
         use std::str;          // 01234567890123
         let initial_msg =   "qwer-asdf-zxcv";
         let chunk_one: &str = "qwer";
@@ -903,7 +870,7 @@ mod test {
         unlink(filename);
     })
 
-    test!(fn file_test_stat_is_correct_on_is_file() {
+    iotest!(fn file_test_stat_is_correct_on_is_file() {
         let tmpdir = tmpdir();
         let filename = &tmpdir.join("file_stat_correct_on_is_file.txt");
         {
@@ -916,7 +883,7 @@ mod test {
         unlink(filename);
     })
 
-    test!(fn file_test_stat_is_correct_on_is_dir() {
+    iotest!(fn file_test_stat_is_correct_on_is_dir() {
         let tmpdir = tmpdir();
         let filename = &tmpdir.join("file_stat_correct_on_is_dir");
         mkdir(filename, io::UserRWX);
@@ -925,7 +892,7 @@ mod test {
         rmdir(filename);
     })
 
-    test!(fn file_test_fileinfo_false_when_checking_is_file_on_a_directory() {
+    iotest!(fn file_test_fileinfo_false_when_checking_is_file_on_a_directory() {
         let tmpdir = tmpdir();
         let dir = &tmpdir.join("fileinfo_false_on_dir");
         mkdir(dir, io::UserRWX);
@@ -933,7 +900,7 @@ mod test {
         rmdir(dir);
     })
 
-    test!(fn file_test_fileinfo_check_exists_before_and_after_file_creation() {
+    iotest!(fn file_test_fileinfo_check_exists_before_and_after_file_creation() {
         let tmpdir = tmpdir();
         let file = &tmpdir.join("fileinfo_check_exists_b_and_a.txt");
         File::create(file).write(bytes!("foo"));
@@ -942,7 +909,7 @@ mod test {
         assert!(!file.exists());
     })
 
-    test!(fn file_test_directoryinfo_check_exists_before_and_after_mkdir() {
+    iotest!(fn file_test_directoryinfo_check_exists_before_and_after_mkdir() {
         let tmpdir = tmpdir();
         let dir = &tmpdir.join("before_and_after_dir");
         assert!(!dir.exists());
@@ -953,7 +920,7 @@ mod test {
         assert!(!dir.exists());
     })
 
-    test!(fn file_test_directoryinfo_readdir() {
+    iotest!(fn file_test_directoryinfo_readdir() {
         use std::str;
         let tmpdir = tmpdir();
         let dir = &tmpdir.join("di_readdir");
@@ -984,11 +951,11 @@ mod test {
         rmdir(dir);
     })
 
-    test!(fn recursive_mkdir_slash() {
+    iotest!(fn recursive_mkdir_slash() {
         mkdir_recursive(&Path::new("/"), io::UserRWX);
     })
 
-    test!(fn unicode_path_is_dir() {
+    iotest!(fn unicode_path_is_dir() {
         assert!(Path::new(".").is_dir());
         assert!(!Path::new("test/stdtest/fs.rs").is_dir());
 
@@ -1006,7 +973,7 @@ mod test {
         assert!(filepath.exists());
     })
 
-    test!(fn unicode_path_exists() {
+    iotest!(fn unicode_path_exists() {
         assert!(Path::new(".").exists());
         assert!(!Path::new("test/nonexistent-bogus-path").exists());
 
@@ -1018,7 +985,7 @@ mod test {
         assert!(!Path::new("test/unicode-bogus-path-각丁ー再见").exists());
     })
 
-    test!(fn copy_file_does_not_exist() {
+    iotest!(fn copy_file_does_not_exist() {
         let from = Path::new("test/nonexistent-bogus-path");
         let to = Path::new("test/other-bogus-path");
         match io::result(|| copy(&from, &to)) {
@@ -1030,7 +997,7 @@ mod test {
         }
     })
 
-    test!(fn copy_file_ok() {
+    iotest!(fn copy_file_ok() {
         let tmpdir = tmpdir();
         let input = tmpdir.join("in.txt");
         let out = tmpdir.join("out.txt");
@@ -1043,7 +1010,7 @@ mod test {
         assert_eq!(input.stat().perm, out.stat().perm);
     })
 
-    test!(fn copy_file_dst_dir() {
+    iotest!(fn copy_file_dst_dir() {
         let tmpdir = tmpdir();
         let out = tmpdir.join("out");
 
@@ -1053,7 +1020,7 @@ mod test {
         }
     })
 
-    test!(fn copy_file_dst_exists() {
+    iotest!(fn copy_file_dst_exists() {
         let tmpdir = tmpdir();
         let input = tmpdir.join("in");
         let output = tmpdir.join("out");
@@ -1066,7 +1033,7 @@ mod test {
                    (bytes!("foo")).to_owned());
     })
 
-    test!(fn copy_file_src_dir() {
+    iotest!(fn copy_file_src_dir() {
         let tmpdir = tmpdir();
         let out = tmpdir.join("out");
 
@@ -1076,7 +1043,7 @@ mod test {
         assert!(!out.exists());
     })
 
-    test!(fn copy_file_preserves_perm_bits() {
+    iotest!(fn copy_file_preserves_perm_bits() {
         let tmpdir = tmpdir();
         let input = tmpdir.join("in.txt");
         let out = tmpdir.join("out.txt");
@@ -1091,7 +1058,7 @@ mod test {
     })
 
     #[cfg(not(windows))] // FIXME(#10264) operation not permitted?
-    test!(fn symlinks_work() {
+    iotest!(fn symlinks_work() {
         let tmpdir = tmpdir();
         let input = tmpdir.join("in.txt");
         let out = tmpdir.join("out.txt");
@@ -1106,14 +1073,14 @@ mod test {
     })
 
     #[cfg(not(windows))] // apparently windows doesn't like symlinks
-    test!(fn symlink_noexist() {
+    iotest!(fn symlink_noexist() {
         let tmpdir = tmpdir();
         // symlinks can point to things that don't exist
         symlink(&tmpdir.join("foo"), &tmpdir.join("bar"));
         assert!(readlink(&tmpdir.join("bar")).unwrap() == tmpdir.join("foo"));
     })
 
-    test!(fn readlink_not_symlink() {
+    iotest!(fn readlink_not_symlink() {
         let tmpdir = tmpdir();
         match io::result(|| readlink(&*tmpdir)) {
             Ok(..) => fail!("wanted a failure"),
@@ -1121,7 +1088,7 @@ mod test {
         }
     })
 
-    test!(fn links_work() {
+    iotest!(fn links_work() {
         let tmpdir = tmpdir();
         let input = tmpdir.join("in.txt");
         let out = tmpdir.join("out.txt");
@@ -1147,7 +1114,7 @@ mod test {
         }
     })
 
-    test!(fn chmod_works() {
+    iotest!(fn chmod_works() {
         let tmpdir = tmpdir();
         let file = tmpdir.join("in.txt");
 
@@ -1164,7 +1131,7 @@ mod test {
         chmod(&file, io::UserFile);
     })
 
-    test!(fn sync_doesnt_kill_anything() {
+    iotest!(fn sync_doesnt_kill_anything() {
         let tmpdir = tmpdir();
         let path = tmpdir.join("in.txt");
 
@@ -1177,7 +1144,7 @@ mod test {
         drop(file);
     })
 
-    test!(fn truncate_works() {
+    iotest!(fn truncate_works() {
         let tmpdir = tmpdir();
         let path = tmpdir.join("in.txt");
 
@@ -1208,7 +1175,7 @@ mod test {
         drop(file);
     })
 
-    test!(fn open_flavors() {
+    iotest!(fn open_flavors() {
         let tmpdir = tmpdir();
 
         match io::result(|| File::open_mode(&tmpdir.join("a"), io::Open,
diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs
index bd0b9e08b7c..2d52986294d 100644
--- a/src/libstd/io/mod.rs
+++ b/src/libstd/io/mod.rs
@@ -164,9 +164,6 @@ requests are implemented by descheduling the running task and
 performing an asynchronous request; the task is only resumed once the
 asynchronous request completes.
 
-For blocking (but possibly more efficient) implementations, look
-in the `io::native` module.
-
 # Error Handling
 
 I/O is an area where nearly every operation can result in unexpected
@@ -316,6 +313,9 @@ pub use self::net::udp::UdpStream;
 pub use self::pipe::PipeStream;
 pub use self::process::Process;
 
+/// Various utility functions useful for writing I/O tests
+pub mod test;
+
 /// Synchronous, non-blocking filesystem operations.
 pub mod fs;
 
@@ -349,8 +349,6 @@ pub mod timer;
 /// Buffered I/O wrappers
 pub mod buffered;
 
-pub mod native;
-
 /// Signal handling
 pub mod signal;
 
diff --git a/src/libstd/io/native/file.rs b/src/libstd/io/native/file.rs
deleted file mode 100644
index de2655303d6..00000000000
--- a/src/libstd/io/native/file.rs
+++ /dev/null
@@ -1,965 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Blocking posix-based file I/O
-
-#[allow(non_camel_case_types)];
-
-use c_str::CString;
-use io::IoError;
-use io;
-use libc::c_int;
-use libc;
-use ops::Drop;
-use option::{Some, None, Option};
-use os;
-use path::{Path, GenericPath};
-use ptr::RawPtr;
-use result::{Result, Ok, Err};
-use rt::rtio;
-use super::IoResult;
-use unstable::intrinsics;
-use vec::ImmutableVector;
-use vec;
-
-#[cfg(windows)] use os::win32::{as_utf16_p, fill_utf16_buf_and_decode};
-#[cfg(windows)] use ptr;
-#[cfg(windows)] use str;
-
-fn keep_going(data: &[u8], f: |*u8, uint| -> i64) -> i64 {
-    #[cfg(windows)] static eintr: int = 0; // doesn't matter
-    #[cfg(not(windows))] static eintr: int = libc::EINTR as int;
-
-    let origamt = data.len();
-    let mut data = data.as_ptr();
-    let mut amt = origamt;
-    while amt > 0 {
-        let mut ret;
-        loop {
-            ret = f(data, amt);
-            if cfg!(not(windows)) { break } // windows has no eintr
-            // if we get an eintr, then try again
-            if ret != -1 || os::errno() as int != eintr { break }
-        }
-        if ret == 0 {
-            break
-        } else if ret != -1 {
-            amt -= ret as uint;
-            data = unsafe { data.offset(ret as int) };
-        } else {
-            return ret;
-        }
-    }
-    return (origamt - amt) as i64;
-}
-
-pub type fd_t = libc::c_int;
-
-pub struct FileDesc {
-    priv fd: fd_t,
-    priv close_on_drop: bool,
-}
-
-impl FileDesc {
-    /// Create a `FileDesc` from an open C file descriptor.
-    ///
-    /// The `FileDesc` will take ownership of the specified file descriptor and
-    /// close it upon destruction if the `close_on_drop` flag is true, otherwise
-    /// it will not close the file descriptor when this `FileDesc` is dropped.
-    ///
-    /// Note that all I/O operations done on this object will be *blocking*, but
-    /// they do not require the runtime to be active.
-    pub fn new(fd: fd_t, close_on_drop: bool) -> FileDesc {
-        FileDesc { fd: fd, close_on_drop: close_on_drop }
-    }
-
-    fn inner_read(&mut self, buf: &mut [u8]) -> Result<uint, IoError> {
-        #[cfg(windows)] type rlen = libc::c_uint;
-        #[cfg(not(windows))] type rlen = libc::size_t;
-        let ret = keep_going(buf, |buf, len| {
-            unsafe {
-                libc::read(self.fd, buf as *mut libc::c_void, len as rlen) as i64
-            }
-        });
-        if ret == 0 {
-            Err(io::standard_error(io::EndOfFile))
-        } else if ret < 0 {
-            Err(super::last_error())
-        } else {
-            Ok(ret as uint)
-        }
-    }
-    fn inner_write(&mut self, buf: &[u8]) -> Result<(), IoError> {
-        #[cfg(windows)] type wlen = libc::c_uint;
-        #[cfg(not(windows))] type wlen = libc::size_t;
-        let ret = keep_going(buf, |buf, len| {
-            unsafe {
-                libc::write(self.fd, buf as *libc::c_void, len as wlen) as i64
-            }
-        });
-        if ret < 0 {
-            Err(super::last_error())
-        } else {
-            Ok(())
-        }
-    }
-}
-
-impl io::Reader for FileDesc {
-    fn read(&mut self, buf: &mut [u8]) -> Option<uint> {
-        match self.inner_read(buf) { Ok(n) => Some(n), Err(..) => None }
-    }
-    fn eof(&mut self) -> bool { false }
-}
-
-impl io::Writer for FileDesc {
-    fn write(&mut self, buf: &[u8]) {
-        self.inner_write(buf);
-    }
-}
-
-impl rtio::RtioFileStream for FileDesc {
-    fn read(&mut self, buf: &mut [u8]) -> Result<int, IoError> {
-        self.inner_read(buf).map(|i| i as int)
-    }
-    fn write(&mut self, buf: &[u8]) -> Result<(), IoError> {
-        self.inner_write(buf)
-    }
-    fn pread(&mut self, buf: &mut [u8], offset: u64) -> Result<int, IoError> {
-        return os_pread(self.fd, buf.as_ptr(), buf.len(), offset);
-
-        #[cfg(windows)]
-        fn os_pread(fd: c_int, buf: *u8, amt: uint, offset: u64) -> IoResult<int> {
-            unsafe {
-                let mut overlap: libc::OVERLAPPED = intrinsics::init();
-                let handle = libc::get_osfhandle(fd) as libc::HANDLE;
-                let mut bytes_read = 0;
-                overlap.Offset = offset as libc::DWORD;
-                overlap.OffsetHigh = (offset >> 32) as libc::DWORD;
-
-                match libc::ReadFile(handle, buf as libc::LPVOID,
-                                     amt as libc::DWORD,
-                                     &mut bytes_read, &mut overlap) {
-                    0 => Err(super::last_error()),
-                    _ => Ok(bytes_read as int)
-                }
-            }
-        }
-
-        #[cfg(unix)]
-        fn os_pread(fd: c_int, buf: *u8, amt: uint, offset: u64) -> IoResult<int> {
-            match unsafe {
-                libc::pread(fd, buf as *libc::c_void, amt as libc::size_t,
-                            offset as libc::off_t)
-            } {
-                -1 => Err(super::last_error()),
-                n => Ok(n as int)
-            }
-        }
-    }
-    fn pwrite(&mut self, buf: &[u8], offset: u64) -> Result<(), IoError> {
-        return os_pwrite(self.fd, buf.as_ptr(), buf.len(), offset);
-
-        #[cfg(windows)]
-        fn os_pwrite(fd: c_int, buf: *u8, amt: uint, offset: u64) -> IoResult<()> {
-            unsafe {
-                let mut overlap: libc::OVERLAPPED = intrinsics::init();
-                let handle = libc::get_osfhandle(fd) as libc::HANDLE;
-                overlap.Offset = offset as libc::DWORD;
-                overlap.OffsetHigh = (offset >> 32) as libc::DWORD;
-
-                match libc::WriteFile(handle, buf as libc::LPVOID,
-                                      amt as libc::DWORD,
-                                      ptr::mut_null(), &mut overlap) {
-                    0 => Err(super::last_error()),
-                    _ => Ok(()),
-                }
-            }
-        }
-
-        #[cfg(unix)]
-        fn os_pwrite(fd: c_int, buf: *u8, amt: uint, offset: u64) -> IoResult<()> {
-            super::mkerr_libc(unsafe {
-                libc::pwrite(fd, buf as *libc::c_void, amt as libc::size_t,
-                             offset as libc::off_t)
-            } as c_int)
-        }
-    }
-    #[cfg(windows)]
-    fn seek(&mut self, pos: i64, style: io::SeekStyle) -> Result<u64, IoError> {
-        let whence = match style {
-            io::SeekSet => libc::FILE_BEGIN,
-            io::SeekEnd => libc::FILE_END,
-            io::SeekCur => libc::FILE_CURRENT,
-        };
-        unsafe {
-            let handle = libc::get_osfhandle(self.fd) as libc::HANDLE;
-            let mut newpos = 0;
-            match libc::SetFilePointerEx(handle, pos, &mut newpos, whence) {
-                0 => Err(super::last_error()),
-                _ => Ok(newpos as u64),
-            }
-        }
-    }
-    #[cfg(unix)]
-    fn seek(&mut self, pos: i64, whence: io::SeekStyle) -> Result<u64, IoError> {
-        let whence = match whence {
-            io::SeekSet => libc::SEEK_SET,
-            io::SeekEnd => libc::SEEK_END,
-            io::SeekCur => libc::SEEK_CUR,
-        };
-        let n = unsafe { libc::lseek(self.fd, pos as libc::off_t, whence) };
-        if n < 0 {
-            Err(super::last_error())
-        } else {
-            Ok(n as u64)
-        }
-    }
-    fn tell(&self) -> Result<u64, IoError> {
-        let n = unsafe { libc::lseek(self.fd, 0, libc::SEEK_CUR) };
-        if n < 0 {
-            Err(super::last_error())
-        } else {
-            Ok(n as u64)
-        }
-    }
-    fn fsync(&mut self) -> Result<(), IoError> {
-        return os_fsync(self.fd);
-
-        #[cfg(windows)]
-        fn os_fsync(fd: c_int) -> IoResult<()> {
-            super::mkerr_winbool(unsafe {
-                let handle = libc::get_osfhandle(fd);
-                libc::FlushFileBuffers(handle as libc::HANDLE)
-            })
-        }
-        #[cfg(unix)]
-        fn os_fsync(fd: c_int) -> IoResult<()> {
-            super::mkerr_libc(unsafe { libc::fsync(fd) })
-        }
-    }
-    #[cfg(windows)]
-    fn datasync(&mut self) -> Result<(), IoError> { return self.fsync(); }
-
-    #[cfg(not(windows))]
-    fn datasync(&mut self) -> Result<(), IoError> {
-        return super::mkerr_libc(os_datasync(self.fd));
-
-        #[cfg(target_os = "macos")]
-        fn os_datasync(fd: c_int) -> c_int {
-            unsafe { libc::fcntl(fd, libc::F_FULLFSYNC) }
-        }
-        #[cfg(target_os = "linux")]
-        fn os_datasync(fd: c_int) -> c_int { unsafe { libc::fdatasync(fd) } }
-        #[cfg(not(target_os = "macos"), not(target_os = "linux"))]
-        fn os_datasync(fd: c_int) -> c_int { unsafe { libc::fsync(fd) } }
-    }
-
-    #[cfg(windows)]
-    fn truncate(&mut self, offset: i64) -> Result<(), IoError> {
-        let orig_pos = match self.tell() { Ok(i) => i, Err(e) => return Err(e) };
-        match self.seek(offset, io::SeekSet) {
-            Ok(_) => {}, Err(e) => return Err(e),
-        };
-        let ret = unsafe {
-            let handle = libc::get_osfhandle(self.fd) as libc::HANDLE;
-            match libc::SetEndOfFile(handle) {
-                0 => Err(super::last_error()),
-                _ => Ok(())
-            }
-        };
-        self.seek(orig_pos as i64, io::SeekSet);
-        return ret;
-    }
-    #[cfg(unix)]
-    fn truncate(&mut self, offset: i64) -> Result<(), IoError> {
-        super::mkerr_libc(unsafe {
-            libc::ftruncate(self.fd, offset as libc::off_t)
-        })
-    }
-}
-
-impl rtio::RtioPipe for FileDesc {
-    fn read(&mut self, buf: &mut [u8]) -> Result<uint, IoError> {
-        self.inner_read(buf)
-    }
-    fn write(&mut self, buf: &[u8]) -> Result<(), IoError> {
-        self.inner_write(buf)
-    }
-}
-
-impl rtio::RtioTTY for FileDesc {
-    fn read(&mut self, buf: &mut [u8]) -> Result<uint, IoError> {
-        self.inner_read(buf)
-    }
-    fn write(&mut self, buf: &[u8]) -> Result<(), IoError> {
-        self.inner_write(buf)
-    }
-    fn set_raw(&mut self, _raw: bool) -> Result<(), IoError> {
-        Err(super::unimpl())
-    }
-    fn get_winsize(&mut self) -> Result<(int, int), IoError> {
-        Err(super::unimpl())
-    }
-    fn isatty(&self) -> bool { false }
-}
-
-impl Drop for FileDesc {
-    fn drop(&mut self) {
-        // closing stdio file handles makes no sense, so never do it
-        if self.close_on_drop && self.fd > libc::STDERR_FILENO {
-            unsafe { libc::close(self.fd); }
-        }
-    }
-}
-
-pub struct CFile {
-    priv file: *libc::FILE,
-    priv fd: FileDesc,
-}
-
-impl CFile {
-    /// Create a `CFile` from an open `FILE` pointer.
-    ///
-    /// The `CFile` takes ownership of the `FILE` pointer and will close it upon
-    /// destruction.
-    pub fn new(file: *libc::FILE) -> CFile {
-        CFile {
-            file: file,
-            fd: FileDesc::new(unsafe { libc::fileno(file) }, false)
-        }
-    }
-
-    pub fn flush(&mut self) -> Result<(), IoError> {
-        super::mkerr_libc(unsafe { libc::fflush(self.file) })
-    }
-}
-
-impl rtio::RtioFileStream for CFile {
-    fn read(&mut self, buf: &mut [u8]) -> Result<int, IoError> {
-        let ret = keep_going(buf, |buf, len| {
-            unsafe {
-                libc::fread(buf as *mut libc::c_void, 1, len as libc::size_t,
-                            self.file) as i64
-            }
-        });
-        if ret == 0 {
-            Err(io::standard_error(io::EndOfFile))
-        } else if ret < 0 {
-            Err(super::last_error())
-        } else {
-            Ok(ret as int)
-        }
-    }
-
-    fn write(&mut self, buf: &[u8]) -> Result<(), IoError> {
-        let ret = keep_going(buf, |buf, len| {
-            unsafe {
-                libc::fwrite(buf as *libc::c_void, 1, len as libc::size_t,
-                            self.file) as i64
-            }
-        });
-        if ret < 0 {
-            Err(super::last_error())
-        } else {
-            Ok(())
-        }
-    }
-
-    fn pread(&mut self, buf: &mut [u8], offset: u64) -> Result<int, IoError> {
-        self.flush();
-        self.fd.pread(buf, offset)
-    }
-    fn pwrite(&mut self, buf: &[u8], offset: u64) -> Result<(), IoError> {
-        self.flush();
-        self.fd.pwrite(buf, offset)
-    }
-    fn seek(&mut self, pos: i64, style: io::SeekStyle) -> Result<u64, IoError> {
-        let whence = match style {
-            io::SeekSet => libc::SEEK_SET,
-            io::SeekEnd => libc::SEEK_END,
-            io::SeekCur => libc::SEEK_CUR,
-        };
-        let n = unsafe { libc::fseek(self.file, pos as libc::c_long, whence) };
-        if n < 0 {
-            Err(super::last_error())
-        } else {
-            Ok(n as u64)
-        }
-    }
-    fn tell(&self) -> Result<u64, IoError> {
-        let ret = unsafe { libc::ftell(self.file) };
-        if ret < 0 {
-            Err(super::last_error())
-        } else {
-            Ok(ret as u64)
-        }
-    }
-    fn fsync(&mut self) -> Result<(), IoError> {
-        self.flush();
-        self.fd.fsync()
-    }
-    fn datasync(&mut self) -> Result<(), IoError> {
-        self.flush();
-        self.fd.fsync()
-    }
-    fn truncate(&mut self, offset: i64) -> Result<(), IoError> {
-        self.flush();
-        self.fd.truncate(offset)
-    }
-}
-
-impl Drop for CFile {
-    fn drop(&mut self) {
-        unsafe { libc::fclose(self.file); }
-    }
-}
-
-pub fn open(path: &CString, fm: io::FileMode, fa: io::FileAccess)
-        -> IoResult<FileDesc> {
-    let flags = match fm {
-        io::Open => 0,
-        io::Append => libc::O_APPEND,
-        io::Truncate => libc::O_TRUNC,
-    };
-    // Opening with a write permission must silently create the file.
-    let (flags, mode) = match fa {
-        io::Read => (flags | libc::O_RDONLY, 0),
-        io::Write => (flags | libc::O_WRONLY | libc::O_CREAT,
-                      libc::S_IRUSR | libc::S_IWUSR),
-        io::ReadWrite => (flags | libc::O_RDWR | libc::O_CREAT,
-                          libc::S_IRUSR | libc::S_IWUSR),
-    };
-
-    return match os_open(path, flags, mode) {
-        -1 => Err(super::last_error()),
-        fd => Ok(FileDesc::new(fd, true)),
-    };
-
-    #[cfg(windows)]
-    fn os_open(path: &CString, flags: c_int, mode: c_int) -> c_int {
-        as_utf16_p(path.as_str().unwrap(), |path| {
-            unsafe { libc::wopen(path, flags, mode) }
-        })
-    }
-
-    #[cfg(unix)]
-    fn os_open(path: &CString, flags: c_int, mode: c_int) -> c_int {
-        unsafe { libc::open(path.with_ref(|p| p), flags, mode) }
-    }
-}
-
-pub fn mkdir(p: &CString, mode: io::FilePermission) -> IoResult<()> {
-    return os_mkdir(p, mode as c_int);
-
-    #[cfg(windows)]
-    fn os_mkdir(p: &CString, _mode: c_int) -> IoResult<()> {
-        super::mkerr_winbool(unsafe {
-            // FIXME: turn mode into something useful? #2623
-            as_utf16_p(p.as_str().unwrap(), |buf| {
-                libc::CreateDirectoryW(buf, ptr::mut_null())
-            })
-        })
-    }
-
-    #[cfg(unix)]
-    fn os_mkdir(p: &CString, mode: c_int) -> IoResult<()> {
-        super::mkerr_libc(unsafe {
-            libc::mkdir(p.with_ref(|p| p), mode as libc::mode_t)
-        })
-    }
-}
-
-pub fn readdir(p: &CString) -> IoResult<~[Path]> {
-    fn prune(root: &CString, dirs: ~[Path]) -> ~[Path] {
-        let root = unsafe { CString::new(root.with_ref(|p| p), false) };
-        let root = Path::new(root);
-
-        dirs.move_iter().filter(|path| {
-            path.as_vec() != bytes!(".") && path.as_vec() != bytes!("..")
-        }).map(|path| root.join(path)).collect()
-    }
-
-    unsafe {
-        #[cfg(not(windows))]
-        unsafe fn get_list(p: &CString) -> IoResult<~[Path]> {
-            use libc::{dirent_t};
-            use libc::{opendir, readdir, closedir};
-            extern {
-                fn rust_list_dir_val(ptr: *dirent_t) -> *libc::c_char;
-            }
-            debug!("os::list_dir -- BEFORE OPENDIR");
-
-            let dir_ptr = p.with_ref(|buf| opendir(buf));
-
-            if (dir_ptr as uint != 0) {
-                let mut paths = ~[];
-                debug!("os::list_dir -- opendir() SUCCESS");
-                let mut entry_ptr = readdir(dir_ptr);
-                while (entry_ptr as uint != 0) {
-                    let cstr = CString::new(rust_list_dir_val(entry_ptr), false);
-                    paths.push(Path::new(cstr));
-                    entry_ptr = readdir(dir_ptr);
-                }
-                closedir(dir_ptr);
-                Ok(paths)
-            } else {
-                Err(super::last_error())
-            }
-        }
-
-        #[cfg(windows)]
-        unsafe fn get_list(p: &CString) -> IoResult<~[Path]> {
-            use libc::consts::os::extra::INVALID_HANDLE_VALUE;
-            use libc::{wcslen, free};
-            use libc::funcs::extra::kernel32::{
-                FindFirstFileW,
-                FindNextFileW,
-                FindClose,
-            };
-            use libc::types::os::arch::extra::HANDLE;
-            use os::win32::{
-                as_utf16_p
-            };
-            use rt::global_heap::malloc_raw;
-
-            #[nolink]
-            extern {
-                fn rust_list_dir_wfd_size() -> libc::size_t;
-                fn rust_list_dir_wfd_fp_buf(wfd: *libc::c_void) -> *u16;
-            }
-            let p = CString::new(p.with_ref(|p| p), false);
-            let p = Path::new(p);
-            let star = p.join("*");
-            as_utf16_p(star.as_str().unwrap(), |path_ptr| {
-                let wfd_ptr = malloc_raw(rust_list_dir_wfd_size() as uint);
-                let find_handle = FindFirstFileW(path_ptr, wfd_ptr as HANDLE);
-                if find_handle as libc::c_int != INVALID_HANDLE_VALUE {
-                    let mut paths = ~[];
-                    let mut more_files = 1 as libc::c_int;
-                    while more_files != 0 {
-                        let fp_buf = rust_list_dir_wfd_fp_buf(wfd_ptr);
-                        if fp_buf as uint == 0 {
-                            fail!("os::list_dir() failure: got null ptr from wfd");
-                        }
-                        else {
-                            let fp_vec = vec::from_buf(
-                                fp_buf, wcslen(fp_buf) as uint);
-                            let fp_str = str::from_utf16(fp_vec);
-                            paths.push(Path::new(fp_str));
-                        }
-                        more_files = FindNextFileW(find_handle, wfd_ptr as HANDLE);
-                    }
-                    FindClose(find_handle);
-                    free(wfd_ptr);
-                    Ok(paths)
-                } else {
-                    Err(super::last_error())
-                }
-            })
-        }
-
-        get_list(p).map(|paths| prune(p, paths))
-    }
-}
-
-pub fn unlink(p: &CString) -> IoResult<()> {
-    return os_unlink(p);
-
-    #[cfg(windows)]
-    fn os_unlink(p: &CString) -> IoResult<()> {
-        super::mkerr_winbool(unsafe {
-            as_utf16_p(p.as_str().unwrap(), |buf| {
-                libc::DeleteFileW(buf)
-            })
-        })
-    }
-
-    #[cfg(unix)]
-    fn os_unlink(p: &CString) -> IoResult<()> {
-        super::mkerr_libc(unsafe { libc::unlink(p.with_ref(|p| p)) })
-    }
-}
-
-pub fn rename(old: &CString, new: &CString) -> IoResult<()> {
-    return os_rename(old, new);
-
-    #[cfg(windows)]
-    fn os_rename(old: &CString, new: &CString) -> IoResult<()> {
-        super::mkerr_winbool(unsafe {
-            as_utf16_p(old.as_str().unwrap(), |old| {
-                as_utf16_p(new.as_str().unwrap(), |new| {
-                    libc::MoveFileExW(old, new, libc::MOVEFILE_REPLACE_EXISTING)
-                })
-            })
-        })
-    }
-
-    #[cfg(unix)]
-    fn os_rename(old: &CString, new: &CString) -> IoResult<()> {
-        super::mkerr_libc(unsafe {
-            libc::rename(old.with_ref(|p| p), new.with_ref(|p| p))
-        })
-    }
-}
-
-pub fn chmod(p: &CString, mode: io::FilePermission) -> IoResult<()> {
-    return super::mkerr_libc(os_chmod(p, mode as c_int));
-
-    #[cfg(windows)]
-    fn os_chmod(p: &CString, mode: c_int) -> c_int {
-        unsafe {
-            as_utf16_p(p.as_str().unwrap(), |p| libc::wchmod(p, mode))
-        }
-    }
-
-    #[cfg(unix)]
-    fn os_chmod(p: &CString, mode: c_int) -> c_int {
-        unsafe { libc::chmod(p.with_ref(|p| p), mode as libc::mode_t) }
-    }
-}
-
-pub fn rmdir(p: &CString) -> IoResult<()> {
-    return super::mkerr_libc(os_rmdir(p));
-
-    #[cfg(windows)]
-    fn os_rmdir(p: &CString) -> c_int {
-        unsafe {
-            as_utf16_p(p.as_str().unwrap(), |p| libc::wrmdir(p))
-        }
-    }
-
-    #[cfg(unix)]
-    fn os_rmdir(p: &CString) -> c_int {
-        unsafe { libc::rmdir(p.with_ref(|p| p)) }
-    }
-}
-
-pub fn chown(p: &CString, uid: int, gid: int) -> IoResult<()> {
-    return super::mkerr_libc(os_chown(p, uid, gid));
-
-    // libuv has this as a no-op, so seems like this should as well?
-    #[cfg(windows)]
-    fn os_chown(_p: &CString, _uid: int, _gid: int) -> c_int { 0 }
-
-    #[cfg(unix)]
-    fn os_chown(p: &CString, uid: int, gid: int) -> c_int {
-        unsafe {
-            libc::chown(p.with_ref(|p| p), uid as libc::uid_t,
-                        gid as libc::gid_t)
-        }
-    }
-}
-
-pub fn readlink(p: &CString) -> IoResult<Path> {
-    return os_readlink(p);
-
-    // XXX: I have a feeling that this reads intermediate symlinks as well.
-    #[cfg(windows)]
-    fn os_readlink(p: &CString) -> IoResult<Path> {
-        let handle = unsafe {
-            as_utf16_p(p.as_str().unwrap(), |p| {
-                libc::CreateFileW(p,
-                                  libc::GENERIC_READ,
-                                  libc::FILE_SHARE_READ,
-                                  ptr::mut_null(),
-                                  libc::OPEN_EXISTING,
-                                  libc::FILE_ATTRIBUTE_NORMAL,
-                                  ptr::mut_null())
-            })
-        };
-        if handle == ptr::mut_null() { return Err(super::last_error()) }
-        let ret = fill_utf16_buf_and_decode(|buf, sz| {
-            unsafe {
-                libc::GetFinalPathNameByHandleW(handle, buf as *u16, sz,
-                                                libc::VOLUME_NAME_NT)
-            }
-        });
-        let ret = match ret {
-            Some(s) => Ok(Path::new(s)),
-            None => Err(super::last_error()),
-        };
-        unsafe { libc::CloseHandle(handle) };
-        return ret;
-
-    }
-
-    #[cfg(unix)]
-    fn os_readlink(p: &CString) -> IoResult<Path> {
-        let p = p.with_ref(|p| p);
-        let mut len = unsafe { libc::pathconf(p, libc::_PC_NAME_MAX) };
-        if len == -1 {
-            len = 1024; // XXX: read PATH_MAX from C ffi?
-        }
-        let mut buf = vec::with_capacity::<u8>(len as uint);
-        match unsafe {
-            libc::readlink(p, buf.as_ptr() as *mut libc::c_char,
-                           len as libc::size_t)
-        } {
-            -1 => Err(super::last_error()),
-            n => {
-                assert!(n > 0);
-                unsafe { buf.set_len(n as uint); }
-                Ok(Path::new(buf))
-            }
-        }
-    }
-}
-
-pub fn symlink(src: &CString, dst: &CString) -> IoResult<()> {
-    return os_symlink(src, dst);
-
-    #[cfg(windows)]
-    fn os_symlink(src: &CString, dst: &CString) -> IoResult<()> {
-        super::mkerr_winbool(as_utf16_p(src.as_str().unwrap(), |src| {
-            as_utf16_p(dst.as_str().unwrap(), |dst| {
-                unsafe { libc::CreateSymbolicLinkW(dst, src, 0) }
-            })
-        }))
-    }
-
-    #[cfg(unix)]
-    fn os_symlink(src: &CString, dst: &CString) -> IoResult<()> {
-        super::mkerr_libc(unsafe {
-            libc::symlink(src.with_ref(|p| p), dst.with_ref(|p| p))
-        })
-    }
-}
-
-pub fn link(src: &CString, dst: &CString) -> IoResult<()> {
-    return os_link(src, dst);
-
-    #[cfg(windows)]
-    fn os_link(src: &CString, dst: &CString) -> IoResult<()> {
-        super::mkerr_winbool(as_utf16_p(src.as_str().unwrap(), |src| {
-            as_utf16_p(dst.as_str().unwrap(), |dst| {
-                unsafe { libc::CreateHardLinkW(dst, src, ptr::mut_null()) }
-            })
-        }))
-    }
-
-    #[cfg(unix)]
-    fn os_link(src: &CString, dst: &CString) -> IoResult<()> {
-        super::mkerr_libc(unsafe {
-            libc::link(src.with_ref(|p| p), dst.with_ref(|p| p))
-        })
-    }
-}
-
-#[cfg(windows)]
-fn mkstat(stat: &libc::stat, path: &CString) -> io::FileStat {
-    let path = unsafe { CString::new(path.with_ref(|p| p), false) };
-    let kind = match (stat.st_mode as c_int) & libc::S_IFMT {
-        libc::S_IFREG => io::TypeFile,
-        libc::S_IFDIR => io::TypeDirectory,
-        libc::S_IFIFO => io::TypeNamedPipe,
-        libc::S_IFBLK => io::TypeBlockSpecial,
-        libc::S_IFLNK => io::TypeSymlink,
-        _ => io::TypeUnknown,
-    };
-
-    io::FileStat {
-        path: Path::new(path),
-        size: stat.st_size as u64,
-        kind: kind,
-        perm: (stat.st_mode) as io::FilePermission & io::AllPermissions,
-        created: stat.st_ctime as u64,
-        modified: stat.st_mtime as u64,
-        accessed: stat.st_atime as u64,
-        unstable: io::UnstableFileStat {
-            device: stat.st_dev as u64,
-            inode: stat.st_ino as u64,
-            rdev: stat.st_rdev as u64,
-            nlink: stat.st_nlink as u64,
-            uid: stat.st_uid as u64,
-            gid: stat.st_gid as u64,
-            blksize: 0,
-            blocks: 0,
-            flags: 0,
-            gen: 0,
-        }
-    }
-}
-
-#[cfg(unix)]
-fn mkstat(stat: &libc::stat, path: &CString) -> io::FileStat {
-    let path = unsafe { CString::new(path.with_ref(|p| p), false) };
-
-    // FileStat times are in milliseconds
-    fn mktime(secs: u64, nsecs: u64) -> u64 { secs * 1000 + nsecs / 1000000 }
-
-    let kind = match (stat.st_mode as c_int) & libc::S_IFMT {
-        libc::S_IFREG => io::TypeFile,
-        libc::S_IFDIR => io::TypeDirectory,
-        libc::S_IFIFO => io::TypeNamedPipe,
-        libc::S_IFBLK => io::TypeBlockSpecial,
-        libc::S_IFLNK => io::TypeSymlink,
-        _ => io::TypeUnknown,
-    };
-
-    #[cfg(not(target_os = "linux"), not(target_os = "android"))]
-    fn flags(stat: &libc::stat) -> u64 { stat.st_flags as u64 }
-    #[cfg(target_os = "linux")] #[cfg(target_os = "android")]
-    fn flags(_stat: &libc::stat) -> u64 { 0 }
-
-    #[cfg(not(target_os = "linux"), not(target_os = "android"))]
-    fn gen(stat: &libc::stat) -> u64 { stat.st_gen as u64 }
-    #[cfg(target_os = "linux")] #[cfg(target_os = "android")]
-    fn gen(_stat: &libc::stat) -> u64 { 0 }
-
-    io::FileStat {
-        path: Path::new(path),
-        size: stat.st_size as u64,
-        kind: kind,
-        perm: (stat.st_mode) as io::FilePermission & io::AllPermissions,
-        created: mktime(stat.st_ctime as u64, stat.st_ctime_nsec as u64),
-        modified: mktime(stat.st_mtime as u64, stat.st_mtime_nsec as u64),
-        accessed: mktime(stat.st_atime as u64, stat.st_atime_nsec as u64),
-        unstable: io::UnstableFileStat {
-            device: stat.st_dev as u64,
-            inode: stat.st_ino as u64,
-            rdev: stat.st_rdev as u64,
-            nlink: stat.st_nlink as u64,
-            uid: stat.st_uid as u64,
-            gid: stat.st_gid as u64,
-            blksize: stat.st_blksize as u64,
-            blocks: stat.st_blocks as u64,
-            flags: flags(stat),
-            gen: gen(stat),
-        }
-    }
-}
-
-pub fn stat(p: &CString) -> IoResult<io::FileStat> {
-    return os_stat(p);
-
-    #[cfg(windows)]
-    fn os_stat(p: &CString) -> IoResult<io::FileStat> {
-        let mut stat: libc::stat = unsafe { intrinsics::uninit() };
-        as_utf16_p(p.as_str().unwrap(), |up| {
-            match unsafe { libc::wstat(up, &mut stat) } {
-                0 => Ok(mkstat(&stat, p)),
-                _ => Err(super::last_error()),
-            }
-        })
-    }
-
-    #[cfg(unix)]
-    fn os_stat(p: &CString) -> IoResult<io::FileStat> {
-        let mut stat: libc::stat = unsafe { intrinsics::uninit() };
-        match unsafe { libc::stat(p.with_ref(|p| p), &mut stat) } {
-            0 => Ok(mkstat(&stat, p)),
-            _ => Err(super::last_error()),
-        }
-    }
-}
-
-pub fn lstat(p: &CString) -> IoResult<io::FileStat> {
-    return os_lstat(p);
-
-    // XXX: windows implementation is missing
-    #[cfg(windows)]
-    fn os_lstat(_p: &CString) -> IoResult<io::FileStat> {
-        Err(super::unimpl())
-    }
-
-    #[cfg(unix)]
-    fn os_lstat(p: &CString) -> IoResult<io::FileStat> {
-        let mut stat: libc::stat = unsafe { intrinsics::uninit() };
-        match unsafe { libc::lstat(p.with_ref(|p| p), &mut stat) } {
-            0 => Ok(mkstat(&stat, p)),
-            _ => Err(super::last_error()),
-        }
-    }
-}
-
-pub fn utime(p: &CString, atime: u64, mtime: u64) -> IoResult<()> {
-    return super::mkerr_libc(os_utime(p, atime, mtime));
-
-    #[cfg(windows)]
-    fn os_utime(p: &CString, atime: u64, mtime: u64) -> c_int {
-        let buf = libc::utimbuf {
-            actime: (atime / 1000) as libc::time64_t,
-            modtime: (mtime / 1000) as libc::time64_t,
-        };
-        unsafe {
-            as_utf16_p(p.as_str().unwrap(), |p| libc::wutime(p, &buf))
-        }
-    }
-
-    #[cfg(unix)]
-    fn os_utime(p: &CString, atime: u64, mtime: u64) -> c_int {
-        let buf = libc::utimbuf {
-            actime: (atime / 1000) as libc::time_t,
-            modtime: (mtime / 1000) as libc::time_t,
-        };
-        unsafe { libc::utime(p.with_ref(|p| p), &buf) }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use io::native::file::{CFile, FileDesc};
-    use io;
-    use libc;
-    use os;
-    use result::Ok;
-    use rt::rtio::RtioFileStream;
-
-    #[ignore(cfg(target_os = "freebsd"))] // hmm, maybe pipes have a tiny buffer
-    #[test]
-    fn test_file_desc() {
-        // Run this test with some pipes so we don't have to mess around with
-        // opening or closing files.
-        unsafe {
-            let os::Pipe { input, out } = os::pipe();
-            let mut reader = FileDesc::new(input, true);
-            let mut writer = FileDesc::new(out, true);
-
-            writer.inner_write(bytes!("test"));
-            let mut buf = [0u8, ..4];
-            match reader.inner_read(buf) {
-                Ok(4) => {
-                    assert_eq!(buf[0], 't' as u8);
-                    assert_eq!(buf[1], 'e' as u8);
-                    assert_eq!(buf[2], 's' as u8);
-                    assert_eq!(buf[3], 't' as u8);
-                }
-                r => fail!("invalid read: {:?}", r)
-            }
-
-            assert!(writer.inner_read(buf).is_err());
-            assert!(reader.inner_write(buf).is_err());
-        }
-    }
-
-    #[ignore(cfg(windows))] // apparently windows doesn't like tmpfile
-    #[test]
-    fn test_cfile() {
-        unsafe {
-            let f = libc::tmpfile();
-            assert!(!f.is_null());
-            let mut file = CFile::new(f);
-
-            file.write(bytes!("test"));
-            let mut buf = [0u8, ..4];
-            file.seek(0, io::SeekSet);
-            match file.read(buf) {
-                Ok(4) => {
-                    assert_eq!(buf[0], 't' as u8);
-                    assert_eq!(buf[1], 'e' as u8);
-                    assert_eq!(buf[2], 's' as u8);
-                    assert_eq!(buf[3], 't' as u8);
-                }
-                r => fail!("invalid read: {:?}", r)
-            }
-        }
-    }
-}
diff --git a/src/libstd/io/native/mod.rs b/src/libstd/io/native/mod.rs
deleted file mode 100644
index d9dccc84f1c..00000000000
--- a/src/libstd/io/native/mod.rs
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Native thread-blocking I/O implementation
-//!
-//! This module contains the implementation of native thread-blocking
-//! implementations of I/O on all platforms. This module is not intended to be
-//! used directly, but rather the rust runtime will fall back to using it if
-//! necessary.
-//!
-//! Rust code normally runs inside of green tasks with a local scheduler using
-//! asynchronous I/O to cooperate among tasks. This model is not always
-//! available, however, and that's where these native implementations come into
-//! play. The only dependencies of these modules are the normal system libraries
-//! that you would find on the respective platform.
-
-use c_str::CString;
-use comm::SharedChan;
-use libc::c_int;
-use libc;
-use option::{Option, None, Some};
-use os;
-use path::Path;
-use result::{Result, Ok, Err};
-use rt::rtio;
-use rt::rtio::{RtioTcpStream, RtioTcpListener, RtioUdpSocket, RtioUnixListener,
-               RtioPipe, RtioFileStream, RtioProcess, RtioSignal, RtioTTY,
-               CloseBehavior, RtioTimer};
-use io;
-use io::IoError;
-use io::net::ip::SocketAddr;
-use io::process::ProcessConfig;
-use io::signal::Signum;
-use ai = io::net::addrinfo;
-
-// Local re-exports
-pub use self::file::FileDesc;
-pub use self::process::Process;
-
-// Native I/O implementations
-pub mod file;
-pub mod process;
-
-type IoResult<T> = Result<T, IoError>;
-
-fn unimpl() -> IoError {
-    IoError {
-        kind: io::IoUnavailable,
-        desc: "unimplemented I/O interface",
-        detail: None,
-    }
-}
-
-fn last_error() -> IoError {
-    #[cfg(windows)]
-    fn get_err(errno: i32) -> (io::IoErrorKind, &'static str) {
-        match errno {
-            libc::EOF => (io::EndOfFile, "end of file"),
-            _ => (io::OtherIoError, "unknown error"),
-        }
-    }
-
-    #[cfg(not(windows))]
-    fn get_err(errno: i32) -> (io::IoErrorKind, &'static str) {
-        // XXX: this should probably be a bit more descriptive...
-        match errno {
-            libc::EOF => (io::EndOfFile, "end of file"),
-
-            // These two constants can have the same value on some systems, but
-            // different values on others, so we can't use a match clause
-            x if x == libc::EAGAIN || x == libc::EWOULDBLOCK =>
-                (io::ResourceUnavailable, "resource temporarily unavailable"),
-
-            _ => (io::OtherIoError, "unknown error"),
-        }
-    }
-
-    let (kind, desc) = get_err(os::errno() as i32);
-    IoError {
-        kind: kind,
-        desc: desc,
-        detail: Some(os::last_os_error())
-    }
-}
-
-// unix has nonzero values as errors
-fn mkerr_libc(ret: libc::c_int) -> IoResult<()> {
-    if ret != 0 {
-        Err(last_error())
-    } else {
-        Ok(())
-    }
-}
-
-// windows has zero values as errors
-#[cfg(windows)]
-fn mkerr_winbool(ret: libc::c_int) -> IoResult<()> {
-    if ret == 0 {
-        Err(last_error())
-    } else {
-        Ok(())
-    }
-}
-
-/// Implementation of rt::rtio's IoFactory trait to generate handles to the
-/// native I/O functionality.
-pub struct IoFactory;
-
-impl rtio::IoFactory for IoFactory {
-    // networking
-    fn tcp_connect(&mut self, _addr: SocketAddr) -> IoResult<~RtioTcpStream> {
-        Err(unimpl())
-    }
-    fn tcp_bind(&mut self, _addr: SocketAddr) -> IoResult<~RtioTcpListener> {
-        Err(unimpl())
-    }
-    fn udp_bind(&mut self, _addr: SocketAddr) -> IoResult<~RtioUdpSocket> {
-        Err(unimpl())
-    }
-    fn unix_bind(&mut self, _path: &CString) -> IoResult<~RtioUnixListener> {
-        Err(unimpl())
-    }
-    fn unix_connect(&mut self, _path: &CString) -> IoResult<~RtioPipe> {
-        Err(unimpl())
-    }
-    fn get_host_addresses(&mut self, _host: Option<&str>, _servname: Option<&str>,
-                          _hint: Option<ai::Hint>) -> IoResult<~[ai::Info]> {
-        Err(unimpl())
-    }
-
-    // filesystem operations
-    fn fs_from_raw_fd(&mut self, fd: c_int,
-                      close: CloseBehavior) -> ~RtioFileStream {
-        let close = match close {
-            rtio::CloseSynchronously | rtio::CloseAsynchronously => true,
-            rtio::DontClose => false
-        };
-        ~file::FileDesc::new(fd, close) as ~RtioFileStream
-    }
-    fn fs_open(&mut self, path: &CString, fm: io::FileMode, fa: io::FileAccess)
-        -> IoResult<~RtioFileStream> {
-        file::open(path, fm, fa).map(|fd| ~fd as ~RtioFileStream)
-    }
-    fn fs_unlink(&mut self, path: &CString) -> IoResult<()> {
-        file::unlink(path)
-    }
-    fn fs_stat(&mut self, path: &CString) -> IoResult<io::FileStat> {
-        file::stat(path)
-    }
-    fn fs_mkdir(&mut self, path: &CString,
-                mode: io::FilePermission) -> IoResult<()> {
-        file::mkdir(path, mode)
-    }
-    fn fs_chmod(&mut self, path: &CString,
-                mode: io::FilePermission) -> IoResult<()> {
-        file::chmod(path, mode)
-    }
-    fn fs_rmdir(&mut self, path: &CString) -> IoResult<()> {
-        file::rmdir(path)
-    }
-    fn fs_rename(&mut self, path: &CString, to: &CString) -> IoResult<()> {
-        file::rename(path, to)
-    }
-    fn fs_readdir(&mut self, path: &CString, _flags: c_int) -> IoResult<~[Path]> {
-        file::readdir(path)
-    }
-    fn fs_lstat(&mut self, path: &CString) -> IoResult<io::FileStat> {
-        file::lstat(path)
-    }
-    fn fs_chown(&mut self, path: &CString, uid: int, gid: int) -> IoResult<()> {
-        file::chown(path, uid, gid)
-    }
-    fn fs_readlink(&mut self, path: &CString) -> IoResult<Path> {
-        file::readlink(path)
-    }
-    fn fs_symlink(&mut self, src: &CString, dst: &CString) -> IoResult<()> {
-        file::symlink(src, dst)
-    }
-    fn fs_link(&mut self, src: &CString, dst: &CString) -> IoResult<()> {
-        file::link(src, dst)
-    }
-    fn fs_utime(&mut self, src: &CString, atime: u64,
-                mtime: u64) -> IoResult<()> {
-        file::utime(src, atime, mtime)
-    }
-
-    // misc
-    fn timer_init(&mut self) -> IoResult<~RtioTimer> {
-        Err(unimpl())
-    }
-    fn spawn(&mut self, config: ProcessConfig)
-            -> IoResult<(~RtioProcess, ~[Option<~RtioPipe>])> {
-        process::Process::spawn(config).map(|(p, io)| {
-            (~p as ~RtioProcess,
-             io.move_iter().map(|p| p.map(|p| ~p as ~RtioPipe)).collect())
-        })
-    }
-    fn pipe_open(&mut self, fd: c_int) -> IoResult<~RtioPipe> {
-        Ok(~file::FileDesc::new(fd, true) as ~RtioPipe)
-    }
-    fn tty_open(&mut self, fd: c_int, _readable: bool) -> IoResult<~RtioTTY> {
-        if unsafe { libc::isatty(fd) } != 0 {
-            // Don't ever close the stdio file descriptors, nothing good really
-            // comes of that.
-            Ok(~file::FileDesc::new(fd, fd > libc::STDERR_FILENO) as ~RtioTTY)
-        } else {
-            Err(IoError {
-                kind: io::MismatchedFileTypeForOperation,
-                desc: "file descriptor is not a TTY",
-                detail: None,
-            })
-        }
-    }
-    fn signal(&mut self, _signal: Signum, _channel: SharedChan<Signum>)
-        -> IoResult<~RtioSignal> {
-        Err(unimpl())
-    }
-}
-
-pub static mut NATIVE_IO_FACTORY: IoFactory = IoFactory;
-
diff --git a/src/libstd/io/native/process.rs b/src/libstd/io/native/process.rs
deleted file mode 100644
index ef972dc4d0a..00000000000
--- a/src/libstd/io/native/process.rs
+++ /dev/null
@@ -1,654 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use io;
-use libc::{pid_t, c_void, c_int};
-use libc;
-use os;
-use prelude::*;
-use ptr;
-use rt::rtio;
-use super::file;
-#[cfg(windows)]
-use cast;
-
-use p = io::process;
-
-/**
- * A value representing a child process.
- *
- * The lifetime of this value is linked to the lifetime of the actual
- * process - the Process destructor calls self.finish() which waits
- * for the process to terminate.
- */
-pub struct Process {
-    /// The unique id of the process (this should never be negative).
-    priv pid: pid_t,
-
-    /// A handle to the process - on unix this will always be NULL, but on
-    /// windows it will be a HANDLE to the process, which will prevent the
-    /// pid being re-used until the handle is closed.
-    priv handle: *(),
-
-    /// None until finish() is called.
-    priv exit_code: Option<int>,
-}
-
-impl Process {
-    /// Creates a new process using native process-spawning abilities provided
-    /// by the OS. Operations on this process will be blocking instead of using
-    /// the runtime for sleeping just this current task.
-    ///
-    /// # Arguments
-    ///
-    /// * prog - the program to run
-    /// * args - the arguments to pass to the program, not including the program
-    ///          itself
-    /// * env - an optional environment to specify for the child process. If
-    ///         this value is `None`, then the child will inherit the parent's
-    ///         environment
-    /// * cwd - an optionally specified current working directory of the child,
-    ///         defaulting to the parent's current working directory
-    /// * stdin, stdout, stderr - These optionally specified file descriptors
-    ///     dictate where the stdin/out/err of the child process will go. If
-    ///     these are `None`, then this module will bind the input/output to an
-    ///     os pipe instead. This process takes ownership of these file
-    ///     descriptors, closing them upon destruction of the process.
-    pub fn spawn(config: p::ProcessConfig)
-        -> Result<(Process, ~[Option<file::FileDesc>]), io::IoError>
-    {
-        // right now we only handle stdin/stdout/stderr.
-        if config.io.len() > 3 {
-            return Err(super::unimpl());
-        }
-
-        fn get_io(io: &[p::StdioContainer],
-                  ret: &mut ~[Option<file::FileDesc>],
-                  idx: uint) -> (Option<os::Pipe>, c_int) {
-            if idx >= io.len() { return (None, -1); }
-            ret.push(None);
-            match io[idx] {
-                p::Ignored => (None, -1),
-                p::InheritFd(fd) => (None, fd),
-                p::CreatePipe(readable, _writable) => {
-                    let pipe = os::pipe();
-                    let (theirs, ours) = if readable {
-                        (pipe.input, pipe.out)
-                    } else {
-                        (pipe.out, pipe.input)
-                    };
-                    ret[idx] = Some(file::FileDesc::new(ours, true));
-                    (Some(pipe), theirs)
-                }
-            }
-        }
-
-        let mut ret_io = ~[];
-        let (in_pipe, in_fd) = get_io(config.io, &mut ret_io, 0);
-        let (out_pipe, out_fd) = get_io(config.io, &mut ret_io, 1);
-        let (err_pipe, err_fd) = get_io(config.io, &mut ret_io, 2);
-
-        let env = config.env.map(|a| a.to_owned());
-        let cwd = config.cwd.map(|a| Path::new(a));
-        let res = spawn_process_os(config.program, config.args, env,
-                                   cwd.as_ref(), in_fd, out_fd, err_fd);
-
-        unsafe {
-            for pipe in in_pipe.iter() { libc::close(pipe.input); }
-            for pipe in out_pipe.iter() { libc::close(pipe.out); }
-            for pipe in err_pipe.iter() { libc::close(pipe.out); }
-        }
-
-        Ok((Process { pid: res.pid, handle: res.handle, exit_code: None }, ret_io))
-    }
-}
-
-impl rtio::RtioProcess for Process {
-    fn id(&self) -> pid_t { self.pid }
-
-    fn wait(&mut self) -> p::ProcessExit {
-        let code = match self.exit_code {
-            Some(code) => code,
-            None => {
-                let code = waitpid(self.pid);
-                self.exit_code = Some(code);
-                code
-            }
-        };
-        return p::ExitStatus(code); // XXX: this is wrong
-    }
-
-    fn kill(&mut self, signum: int) -> Result<(), io::IoError> {
-        // if the process has finished, and therefore had waitpid called,
-        // and we kill it, then on unix we might ending up killing a
-        // newer process that happens to have the same (re-used) id
-        match self.exit_code {
-            Some(..) => return Err(io::IoError {
-                kind: io::OtherIoError,
-                desc: "can't kill an exited process",
-                detail: None,
-            }),
-            None => {}
-        }
-        return unsafe { killpid(self.pid, signum) };
-
-        #[cfg(windows)]
-        unsafe fn killpid(pid: pid_t, signal: int) -> Result<(), io::IoError> {
-            match signal {
-                io::process::PleaseExitSignal | io::process::MustDieSignal => {
-                    libc::funcs::extra::kernel32::TerminateProcess(
-                        cast::transmute(pid), 1);
-                    Ok(())
-                }
-                _ => Err(io::IoError {
-                    kind: io::OtherIoError,
-                    desc: "unsupported signal on windows",
-                    detail: None,
-                })
-            }
-        }
-
-        #[cfg(not(windows))]
-        unsafe fn killpid(pid: pid_t, signal: int) -> Result<(), io::IoError> {
-            libc::funcs::posix88::signal::kill(pid, signal as c_int);
-            Ok(())
-        }
-    }
-}
-
-impl Drop for Process {
-    fn drop(&mut self) {
-        free_handle(self.handle);
-    }
-}
-
-struct SpawnProcessResult {
-    pid: pid_t,
-    handle: *(),
-}
-
-#[cfg(windows)]
-fn spawn_process_os(prog: &str, args: &[~str],
-                    env: Option<~[(~str, ~str)]>,
-                    dir: Option<&Path>,
-                    in_fd: c_int, out_fd: c_int, err_fd: c_int) -> SpawnProcessResult {
-    use libc::types::os::arch::extra::{DWORD, HANDLE, STARTUPINFO};
-    use libc::consts::os::extra::{
-        TRUE, FALSE,
-        STARTF_USESTDHANDLES,
-        INVALID_HANDLE_VALUE,
-        DUPLICATE_SAME_ACCESS
-    };
-    use libc::funcs::extra::kernel32::{
-        GetCurrentProcess,
-        DuplicateHandle,
-        CloseHandle,
-        CreateProcessA
-    };
-    use libc::funcs::extra::msvcrt::get_osfhandle;
-
-    use mem;
-
-    unsafe {
-
-        let mut si = zeroed_startupinfo();
-        si.cb = mem::size_of::<STARTUPINFO>() as DWORD;
-        si.dwFlags = STARTF_USESTDHANDLES;
-
-        let cur_proc = GetCurrentProcess();
-
-        let orig_std_in = get_osfhandle(in_fd) as HANDLE;
-        if orig_std_in == INVALID_HANDLE_VALUE as HANDLE {
-            fail!("failure in get_osfhandle: {}", os::last_os_error());
-        }
-        if DuplicateHandle(cur_proc, orig_std_in, cur_proc, &mut si.hStdInput,
-                           0, TRUE, DUPLICATE_SAME_ACCESS) == FALSE {
-            fail!("failure in DuplicateHandle: {}", os::last_os_error());
-        }
-
-        let orig_std_out = get_osfhandle(out_fd) as HANDLE;
-        if orig_std_out == INVALID_HANDLE_VALUE as HANDLE {
-            fail!("failure in get_osfhandle: {}", os::last_os_error());
-        }
-        if DuplicateHandle(cur_proc, orig_std_out, cur_proc, &mut si.hStdOutput,
-                           0, TRUE, DUPLICATE_SAME_ACCESS) == FALSE {
-            fail!("failure in DuplicateHandle: {}", os::last_os_error());
-        }
-
-        let orig_std_err = get_osfhandle(err_fd) as HANDLE;
-        if orig_std_err == INVALID_HANDLE_VALUE as HANDLE {
-            fail!("failure in get_osfhandle: {}", os::last_os_error());
-        }
-        if DuplicateHandle(cur_proc, orig_std_err, cur_proc, &mut si.hStdError,
-                           0, TRUE, DUPLICATE_SAME_ACCESS) == FALSE {
-            fail!("failure in DuplicateHandle: {}", os::last_os_error());
-        }
-
-        let cmd = make_command_line(prog, args);
-        let mut pi = zeroed_process_information();
-        let mut create_err = None;
-
-        with_envp(env, |envp| {
-            with_dirp(dir, |dirp| {
-                cmd.with_c_str(|cmdp| {
-                    let created = CreateProcessA(ptr::null(), cast::transmute(cmdp),
-                                                 ptr::mut_null(), ptr::mut_null(), TRUE,
-                                                 0, envp, dirp, &mut si, &mut pi);
-                    if created == FALSE {
-                        create_err = Some(os::last_os_error());
-                    }
-                })
-            })
-        });
-
-        CloseHandle(si.hStdInput);
-        CloseHandle(si.hStdOutput);
-        CloseHandle(si.hStdError);
-
-        for msg in create_err.iter() {
-            fail!("failure in CreateProcess: {}", *msg);
-        }
-
-        // We close the thread handle because we don't care about keeping the
-        // thread id valid, and we aren't keeping the thread handle around to be
-        // able to close it later. We don't close the process handle however
-        // because we want the process id to stay valid at least until the
-        // calling code closes the process handle.
-        CloseHandle(pi.hThread);
-
-        SpawnProcessResult {
-            pid: pi.dwProcessId as pid_t,
-            handle: pi.hProcess as *()
-        }
-    }
-}
-
-#[cfg(windows)]
-fn zeroed_startupinfo() -> libc::types::os::arch::extra::STARTUPINFO {
-    libc::types::os::arch::extra::STARTUPINFO {
-        cb: 0,
-        lpReserved: ptr::mut_null(),
-        lpDesktop: ptr::mut_null(),
-        lpTitle: ptr::mut_null(),
-        dwX: 0,
-        dwY: 0,
-        dwXSize: 0,
-        dwYSize: 0,
-        dwXCountChars: 0,
-        dwYCountCharts: 0,
-        dwFillAttribute: 0,
-        dwFlags: 0,
-        wShowWindow: 0,
-        cbReserved2: 0,
-        lpReserved2: ptr::mut_null(),
-        hStdInput: ptr::mut_null(),
-        hStdOutput: ptr::mut_null(),
-        hStdError: ptr::mut_null()
-    }
-}
-
-#[cfg(windows)]
-fn zeroed_process_information() -> libc::types::os::arch::extra::PROCESS_INFORMATION {
-    libc::types::os::arch::extra::PROCESS_INFORMATION {
-        hProcess: ptr::mut_null(),
-        hThread: ptr::mut_null(),
-        dwProcessId: 0,
-        dwThreadId: 0
-    }
-}
-
-// FIXME: this is only pub so it can be tested (see issue #4536)
-#[cfg(windows)]
-pub fn make_command_line(prog: &str, args: &[~str]) -> ~str {
-    let mut cmd = ~"";
-    append_arg(&mut cmd, prog);
-    for arg in args.iter() {
-        cmd.push_char(' ');
-        append_arg(&mut cmd, *arg);
-    }
-    return cmd;
-
-    fn append_arg(cmd: &mut ~str, arg: &str) {
-        let quote = arg.chars().any(|c| c == ' ' || c == '\t');
-        if quote {
-            cmd.push_char('"');
-        }
-        for i in range(0u, arg.len()) {
-            append_char_at(cmd, arg, i);
-        }
-        if quote {
-            cmd.push_char('"');
-        }
-    }
-
-    fn append_char_at(cmd: &mut ~str, arg: &str, i: uint) {
-        match arg[i] as char {
-            '"' => {
-                // Escape quotes.
-                cmd.push_str("\\\"");
-            }
-            '\\' => {
-                if backslash_run_ends_in_quote(arg, i) {
-                    // Double all backslashes that are in runs before quotes.
-                    cmd.push_str("\\\\");
-                } else {
-                    // Pass other backslashes through unescaped.
-                    cmd.push_char('\\');
-                }
-            }
-            c => {
-                cmd.push_char(c);
-            }
-        }
-    }
-
-    fn backslash_run_ends_in_quote(s: &str, mut i: uint) -> bool {
-        while i < s.len() && s[i] as char == '\\' {
-            i += 1;
-        }
-        return i < s.len() && s[i] as char == '"';
-    }
-}
-
-#[cfg(unix)]
-fn spawn_process_os(prog: &str, args: &[~str],
-                    env: Option<~[(~str, ~str)]>,
-                    dir: Option<&Path>,
-                    in_fd: c_int, out_fd: c_int, err_fd: c_int) -> SpawnProcessResult {
-    use libc::funcs::posix88::unistd::{fork, dup2, close, chdir, execvp};
-    use libc::funcs::bsd44::getdtablesize;
-
-    mod rustrt {
-        extern {
-            pub fn rust_unset_sigprocmask();
-        }
-    }
-
-    #[cfg(windows)]
-    unsafe fn set_environ(_envp: *c_void) {}
-    #[cfg(target_os = "macos")]
-    unsafe fn set_environ(envp: *c_void) {
-        extern { fn _NSGetEnviron() -> *mut *c_void; }
-
-        *_NSGetEnviron() = envp;
-    }
-    #[cfg(not(target_os = "macos"), not(windows))]
-    unsafe fn set_environ(envp: *c_void) {
-        extern {
-            static mut environ: *c_void;
-        }
-        environ = envp;
-    }
-
-    unsafe {
-
-        let pid = fork();
-        if pid < 0 {
-            fail!("failure in fork: {}", os::last_os_error());
-        } else if pid > 0 {
-            return SpawnProcessResult {pid: pid, handle: ptr::null()};
-        }
-
-        rustrt::rust_unset_sigprocmask();
-
-        if dup2(in_fd, 0) == -1 {
-            fail!("failure in dup2(in_fd, 0): {}", os::last_os_error());
-        }
-        if dup2(out_fd, 1) == -1 {
-            fail!("failure in dup2(out_fd, 1): {}", os::last_os_error());
-        }
-        if dup2(err_fd, 2) == -1 {
-            fail!("failure in dup3(err_fd, 2): {}", os::last_os_error());
-        }
-        // close all other fds
-        for fd in range(3, getdtablesize()).invert() {
-            close(fd as c_int);
-        }
-
-        with_dirp(dir, |dirp| {
-            if !dirp.is_null() && chdir(dirp) == -1 {
-                fail!("failure in chdir: {}", os::last_os_error());
-            }
-        });
-
-        with_envp(env, |envp| {
-            if !envp.is_null() {
-                set_environ(envp);
-            }
-            with_argv(prog, args, |argv| {
-                execvp(*argv, argv);
-                // execvp only returns if an error occurred
-                fail!("failure in execvp: {}", os::last_os_error());
-            })
-        })
-    }
-}
-
-#[cfg(unix)]
-fn with_argv<T>(prog: &str, args: &[~str], cb: |**libc::c_char| -> T) -> T {
-    use vec;
-
-    // We can't directly convert `str`s into `*char`s, as someone needs to hold
-    // a reference to the intermediary byte buffers. So first build an array to
-    // hold all the ~[u8] byte strings.
-    let mut tmps = vec::with_capacity(args.len() + 1);
-
-    tmps.push(prog.to_c_str());
-
-    for arg in args.iter() {
-        tmps.push(arg.to_c_str());
-    }
-
-    // Next, convert each of the byte strings into a pointer. This is
-    // technically unsafe as the caller could leak these pointers out of our
-    // scope.
-    let mut ptrs = tmps.map(|tmp| tmp.with_ref(|buf| buf));
-
-    // Finally, make sure we add a null pointer.
-    ptrs.push(ptr::null());
-
-    cb(ptrs.as_ptr())
-}
-
-#[cfg(unix)]
-fn with_envp<T>(env: Option<~[(~str, ~str)]>, cb: |*c_void| -> T) -> T {
-    use vec;
-
-    // On posixy systems we can pass a char** for envp, which is a
-    // null-terminated array of "k=v\n" strings. Like `with_argv`, we have to
-    // have a temporary buffer to hold the intermediary `~[u8]` byte strings.
-    match env {
-        Some(env) => {
-            let mut tmps = vec::with_capacity(env.len());
-
-            for pair in env.iter() {
-                let kv = format!("{}={}", pair.first(), pair.second());
-                tmps.push(kv.to_c_str());
-            }
-
-            // Once again, this is unsafe.
-            let mut ptrs = tmps.map(|tmp| tmp.with_ref(|buf| buf));
-            ptrs.push(ptr::null());
-
-            cb(ptrs.as_ptr() as *c_void)
-        }
-        _ => cb(ptr::null())
-    }
-}
-
-#[cfg(windows)]
-fn with_envp<T>(env: Option<~[(~str, ~str)]>, cb: |*mut c_void| -> T) -> T {
-    // On win32 we pass an "environment block" which is not a char**, but
-    // rather a concatenation of null-terminated k=v\0 sequences, with a final
-    // \0 to terminate.
-    match env {
-        Some(env) => {
-            let mut blk = ~[];
-
-            for pair in env.iter() {
-                let kv = format!("{}={}", pair.first(), pair.second());
-                blk.push_all(kv.as_bytes());
-                blk.push(0);
-            }
-
-            blk.push(0);
-
-            cb(blk.as_mut_ptr() as *mut c_void)
-        }
-        _ => cb(ptr::mut_null())
-    }
-}
-
-fn with_dirp<T>(d: Option<&Path>, cb: |*libc::c_char| -> T) -> T {
-    match d {
-      Some(dir) => dir.with_c_str(|buf| cb(buf)),
-      None => cb(ptr::null())
-    }
-}
-
-#[cfg(windows)]
-fn free_handle(handle: *()) {
-    unsafe {
-        libc::funcs::extra::kernel32::CloseHandle(cast::transmute(handle));
-    }
-}
-
-#[cfg(unix)]
-fn free_handle(_handle: *()) {
-    // unix has no process handle object, just a pid
-}
-
-/**
- * Waits for a process to exit and returns the exit code, failing
- * if there is no process with the specified id.
- *
- * Note that this is private to avoid race conditions on unix where if
- * a user calls waitpid(some_process.get_id()) then some_process.finish()
- * and some_process.destroy() and some_process.finalize() will then either
- * operate on a none-existent process or, even worse, on a newer process
- * with the same id.
- */
-fn waitpid(pid: pid_t) -> int {
-    return waitpid_os(pid);
-
-    #[cfg(windows)]
-    fn waitpid_os(pid: pid_t) -> int {
-        use libc::types::os::arch::extra::DWORD;
-        use libc::consts::os::extra::{
-            SYNCHRONIZE,
-            PROCESS_QUERY_INFORMATION,
-            FALSE,
-            STILL_ACTIVE,
-            INFINITE,
-            WAIT_FAILED
-        };
-        use libc::funcs::extra::kernel32::{
-            OpenProcess,
-            GetExitCodeProcess,
-            CloseHandle,
-            WaitForSingleObject
-        };
-
-        unsafe {
-
-            let process = OpenProcess(SYNCHRONIZE | PROCESS_QUERY_INFORMATION,
-                                      FALSE,
-                                      pid as DWORD);
-            if process.is_null() {
-                fail!("failure in OpenProcess: {}", os::last_os_error());
-            }
-
-            loop {
-                let mut status = 0;
-                if GetExitCodeProcess(process, &mut status) == FALSE {
-                    CloseHandle(process);
-                    fail!("failure in GetExitCodeProcess: {}", os::last_os_error());
-                }
-                if status != STILL_ACTIVE {
-                    CloseHandle(process);
-                    return status as int;
-                }
-                if WaitForSingleObject(process, INFINITE) == WAIT_FAILED {
-                    CloseHandle(process);
-                    fail!("failure in WaitForSingleObject: {}", os::last_os_error());
-                }
-            }
-        }
-    }
-
-    #[cfg(unix)]
-    fn waitpid_os(pid: pid_t) -> int {
-        use libc::funcs::posix01::wait::*;
-
-        #[cfg(target_os = "linux")]
-        #[cfg(target_os = "android")]
-        fn WIFEXITED(status: i32) -> bool {
-            (status & 0xffi32) == 0i32
-        }
-
-        #[cfg(target_os = "macos")]
-        #[cfg(target_os = "freebsd")]
-        fn WIFEXITED(status: i32) -> bool {
-            (status & 0x7fi32) == 0i32
-        }
-
-        #[cfg(target_os = "linux")]
-        #[cfg(target_os = "android")]
-        fn WEXITSTATUS(status: i32) -> i32 {
-            (status >> 8i32) & 0xffi32
-        }
-
-        #[cfg(target_os = "macos")]
-        #[cfg(target_os = "freebsd")]
-        fn WEXITSTATUS(status: i32) -> i32 {
-            status >> 8i32
-        }
-
-        let mut status = 0 as c_int;
-        if unsafe { waitpid(pid, &mut status, 0) } == -1 {
-            fail!("failure in waitpid: {}", os::last_os_error());
-        }
-
-        return if WIFEXITED(status) {
-            WEXITSTATUS(status) as int
-        } else {
-            1
-        };
-    }
-}
-
-#[cfg(test)]
-mod tests {
-
-    #[test] #[cfg(windows)]
-    fn test_make_command_line() {
-        use super::make_command_line;
-        assert_eq!(
-            make_command_line("prog", [~"aaa", ~"bbb", ~"ccc"]),
-            ~"prog aaa bbb ccc"
-        );
-        assert_eq!(
-            make_command_line("C:\\Program Files\\blah\\blah.exe", [~"aaa"]),
-            ~"\"C:\\Program Files\\blah\\blah.exe\" aaa"
-        );
-        assert_eq!(
-            make_command_line("C:\\Program Files\\test", [~"aa\"bb"]),
-            ~"\"C:\\Program Files\\test\" aa\\\"bb"
-        );
-        assert_eq!(
-            make_command_line("echo", [~"a b c"]),
-            ~"echo \"a b c\""
-        );
-    }
-
-    // Currently most of the tests of this functionality live inside std::run,
-    // but they may move here eventually as a non-blocking backend is added to
-    // std::run
-}
diff --git a/src/libstd/io/net/addrinfo.rs b/src/libstd/io/net/addrinfo.rs
index 7df4fdd2266..6d968de209c 100644
--- a/src/libstd/io/net/addrinfo.rs
+++ b/src/libstd/io/net/addrinfo.rs
@@ -18,8 +18,6 @@ getaddrinfo()
 */
 
 use option::{Option, Some, None};
-use result::{Ok, Err};
-use io::{io_error};
 use io::net::ip::{SocketAddr, IpAddr};
 use rt::rtio::{IoFactory, LocalIo};
 use vec::ImmutableVector;
@@ -97,14 +95,7 @@ pub fn get_host_addresses(host: &str) -> Option<~[IpAddr]> {
 ///      consumption just yet.
 fn lookup(hostname: Option<&str>, servname: Option<&str>, hint: Option<Hint>)
           -> Option<~[Info]> {
-    let mut io = LocalIo::borrow();
-    match io.get().get_host_addresses(hostname, servname, hint) {
-        Ok(i) => Some(i),
-        Err(ioerr) => {
-            io_error::cond.raise(ioerr);
-            None
-        }
-    }
+    LocalIo::maybe_raise(|io| io.get_host_addresses(hostname, servname, hint))
 }
 
 #[cfg(test)]
diff --git a/src/libstd/io/net/tcp.rs b/src/libstd/io/net/tcp.rs
index a6230ede7e3..e7787692dd2 100644
--- a/src/libstd/io/net/tcp.rs
+++ b/src/libstd/io/net/tcp.rs
@@ -26,17 +26,9 @@ impl TcpStream {
     }
 
     pub fn connect(addr: SocketAddr) -> Option<TcpStream> {
-        let result = {
-            let mut io = LocalIo::borrow();
-            io.get().tcp_connect(addr)
-        };
-        match result {
-            Ok(s) => Some(TcpStream::new(s)),
-            Err(ioerr) => {
-                io_error::cond.raise(ioerr);
-                None
-            }
-        }
+        LocalIo::maybe_raise(|io| {
+            io.tcp_connect(addr).map(TcpStream::new)
+        })
     }
 
     pub fn peer_name(&mut self) -> Option<SocketAddr> {
@@ -94,14 +86,9 @@ pub struct TcpListener {
 
 impl TcpListener {
     pub fn bind(addr: SocketAddr) -> Option<TcpListener> {
-        let mut io = LocalIo::borrow();
-        match io.get().tcp_bind(addr) {
-            Ok(l) => Some(TcpListener { obj: l }),
-            Err(ioerr) => {
-                io_error::cond.raise(ioerr);
-                None
-            }
-        }
+        LocalIo::maybe_raise(|io| {
+            io.tcp_bind(addr).map(|l| TcpListener { obj: l })
+        })
     }
 
     pub fn socket_name(&mut self) -> Option<SocketAddr> {
@@ -147,513 +134,473 @@ impl Acceptor<TcpStream> for TcpAcceptor {
 #[cfg(test)]
 mod test {
     use super::*;
-    use rt::test::*;
     use io::net::ip::{Ipv4Addr, SocketAddr};
     use io::*;
+    use io::test::{next_test_ip4, next_test_ip6};
     use prelude::*;
 
     #[test] #[ignore]
     fn bind_error() {
-        do run_in_mt_newsched_task {
-            let mut called = false;
-            io_error::cond.trap(|e| {
-                assert!(e.kind == PermissionDenied);
-                called = true;
-            }).inside(|| {
-                let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
-                let listener = TcpListener::bind(addr);
-                assert!(listener.is_none());
-            });
-            assert!(called);
-        }
+        let mut called = false;
+        io_error::cond.trap(|e| {
+            assert!(e.kind == PermissionDenied);
+            called = true;
+        }).inside(|| {
+            let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
+            let listener = TcpListener::bind(addr);
+            assert!(listener.is_none());
+        });
+        assert!(called);
     }
 
     #[test]
     fn connect_error() {
-        do run_in_mt_newsched_task {
-            let mut called = false;
-            io_error::cond.trap(|e| {
-                let expected_error = if cfg!(unix) {
-                    ConnectionRefused
-                } else {
-                    // On Win32, opening port 1 gives WSAEADDRNOTAVAIL error.
-                    OtherIoError
-                };
-                assert_eq!(e.kind, expected_error);
-                called = true;
-            }).inside(|| {
-                let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
-                let stream = TcpStream::connect(addr);
-                assert!(stream.is_none());
-            });
-            assert!(called);
-        }
+        let mut called = false;
+        io_error::cond.trap(|e| {
+            let expected_error = if cfg!(unix) {
+                ConnectionRefused
+            } else {
+                // On Win32, opening port 1 gives WSAEADDRNOTAVAIL error.
+                OtherIoError
+            };
+            assert_eq!(e.kind, expected_error);
+            called = true;
+        }).inside(|| {
+            let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
+            let stream = TcpStream::connect(addr);
+            assert!(stream.is_none());
+        });
+        assert!(called);
     }
 
     #[test]
     fn smoke_test_ip4() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip4();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                let mut stream = acceptor.accept();
-                let mut buf = [0];
-                stream.read(buf);
-                assert!(buf[0] == 99);
-            }
+        let addr = next_test_ip4();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             let mut stream = TcpStream::connect(addr);
             stream.write([99]);
         }
+
+        let mut acceptor = TcpListener::bind(addr).listen();
+        chan.send(());
+        let mut stream = acceptor.accept();
+        let mut buf = [0];
+        stream.read(buf);
+        assert!(buf[0] == 99);
     }
 
     #[test]
     fn smoke_test_ip6() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip6();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                let mut stream = acceptor.accept();
-                let mut buf = [0];
-                stream.read(buf);
-                assert!(buf[0] == 99);
-            }
+        let addr = next_test_ip6();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             let mut stream = TcpStream::connect(addr);
             stream.write([99]);
         }
+
+        let mut acceptor = TcpListener::bind(addr).listen();
+        chan.send(());
+        let mut stream = acceptor.accept();
+        let mut buf = [0];
+        stream.read(buf);
+        assert!(buf[0] == 99);
     }
 
     #[test]
     fn read_eof_ip4() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip4();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                let mut stream = acceptor.accept();
-                let mut buf = [0];
-                let nread = stream.read(buf);
-                assert!(nread.is_none());
-            }
+        let addr = next_test_ip4();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             let _stream = TcpStream::connect(addr);
             // Close
         }
+
+        let mut acceptor = TcpListener::bind(addr).listen();
+        chan.send(());
+        let mut stream = acceptor.accept();
+        let mut buf = [0];
+        let nread = stream.read(buf);
+        assert!(nread.is_none());
     }
 
     #[test]
     fn read_eof_ip6() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip6();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                let mut stream = acceptor.accept();
-                let mut buf = [0];
-                let nread = stream.read(buf);
-                assert!(nread.is_none());
-            }
+        let addr = next_test_ip6();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             let _stream = TcpStream::connect(addr);
             // Close
         }
+
+        let mut acceptor = TcpListener::bind(addr).listen();
+        chan.send(());
+        let mut stream = acceptor.accept();
+        let mut buf = [0];
+        let nread = stream.read(buf);
+        assert!(nread.is_none());
     }
 
     #[test]
     fn read_eof_twice_ip4() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip4();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                let mut stream = acceptor.accept();
-                let mut buf = [0];
-                let nread = stream.read(buf);
-                assert!(nread.is_none());
-                io_error::cond.trap(|e| {
-                    if cfg!(windows) {
-                        assert_eq!(e.kind, NotConnected);
-                    } else {
-                        fail!();
-                    }
-                }).inside(|| {
-                    let nread = stream.read(buf);
-                    assert!(nread.is_none());
-                })
-            }
+        let addr = next_test_ip4();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             let _stream = TcpStream::connect(addr);
             // Close
         }
+
+        let mut acceptor = TcpListener::bind(addr).listen();
+        chan.send(());
+        let mut stream = acceptor.accept();
+        let mut buf = [0];
+        let nread = stream.read(buf);
+        assert!(nread.is_none());
+        io_error::cond.trap(|e| {
+            if cfg!(windows) {
+                assert_eq!(e.kind, NotConnected);
+            } else {
+                fail!();
+            }
+        }).inside(|| {
+            let nread = stream.read(buf);
+            assert!(nread.is_none());
+        })
     }
 
     #[test]
     fn read_eof_twice_ip6() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip6();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                let mut stream = acceptor.accept();
-                let mut buf = [0];
-                let nread = stream.read(buf);
-                assert!(nread.is_none());
-                io_error::cond.trap(|e| {
-                    if cfg!(windows) {
-                        assert_eq!(e.kind, NotConnected);
-                    } else {
-                        fail!();
-                    }
-                }).inside(|| {
-                    let nread = stream.read(buf);
-                    assert!(nread.is_none());
-                })
-            }
+        let addr = next_test_ip6();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             let _stream = TcpStream::connect(addr);
             // Close
         }
+
+        let mut acceptor = TcpListener::bind(addr).listen();
+        chan.send(());
+        let mut stream = acceptor.accept();
+        let mut buf = [0];
+        let nread = stream.read(buf);
+        assert!(nread.is_none());
+        io_error::cond.trap(|e| {
+            if cfg!(windows) {
+                assert_eq!(e.kind, NotConnected);
+            } else {
+                fail!();
+            }
+        }).inside(|| {
+            let nread = stream.read(buf);
+            assert!(nread.is_none());
+        })
     }
 
     #[test]
     fn write_close_ip4() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip4();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                let mut stream = acceptor.accept();
-                let buf = [0];
-                loop {
-                    let mut stop = false;
-                    io_error::cond.trap(|e| {
-                        // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED
-                        //     on windows
-                        assert!(e.kind == ConnectionReset ||
-                                e.kind == BrokenPipe ||
-                                e.kind == ConnectionAborted,
-                                "unknown error: {:?}", e);
-                        stop = true;
-                    }).inside(|| {
-                        stream.write(buf);
-                    });
-                    if stop { break }
-                }
-            }
+        let addr = next_test_ip4();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             let _stream = TcpStream::connect(addr);
             // Close
         }
+
+        let mut acceptor = TcpListener::bind(addr).listen();
+        chan.send(());
+        let mut stream = acceptor.accept();
+        let buf = [0];
+        loop {
+            let mut stop = false;
+            io_error::cond.trap(|e| {
+                // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED
+                //     on windows
+                assert!(e.kind == ConnectionReset ||
+                        e.kind == BrokenPipe ||
+                        e.kind == ConnectionAborted,
+                        "unknown error: {:?}", e);
+                stop = true;
+            }).inside(|| {
+                stream.write(buf);
+            });
+            if stop { break }
+        }
     }
 
     #[test]
     fn write_close_ip6() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip6();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                let mut stream = acceptor.accept();
-                let buf = [0];
-                loop {
-                    let mut stop = false;
-                    io_error::cond.trap(|e| {
-                        // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED
-                        //     on windows
-                        assert!(e.kind == ConnectionReset ||
-                                e.kind == BrokenPipe ||
-                                e.kind == ConnectionAborted,
-                                "unknown error: {:?}", e);
-                        stop = true;
-                    }).inside(|| {
-                        stream.write(buf);
-                    });
-                    if stop { break }
-                }
-            }
+        let addr = next_test_ip6();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             let _stream = TcpStream::connect(addr);
             // Close
         }
+
+        let mut acceptor = TcpListener::bind(addr).listen();
+        chan.send(());
+        let mut stream = acceptor.accept();
+        let buf = [0];
+        loop {
+            let mut stop = false;
+            io_error::cond.trap(|e| {
+                // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED
+                //     on windows
+                assert!(e.kind == ConnectionReset ||
+                        e.kind == BrokenPipe ||
+                        e.kind == ConnectionAborted,
+                        "unknown error: {:?}", e);
+                stop = true;
+            }).inside(|| {
+                stream.write(buf);
+            });
+            if stop { break }
+        }
     }
 
     #[test]
     fn multiple_connect_serial_ip4() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip4();
-            let max = 10;
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                for ref mut stream in acceptor.incoming().take(max) {
-                    let mut buf = [0];
-                    stream.read(buf);
-                    assert_eq!(buf[0], 99);
-                }
-            }
+        let addr = next_test_ip4();
+        let max = 10;
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             max.times(|| {
                 let mut stream = TcpStream::connect(addr);
                 stream.write([99]);
             });
         }
+
+        let mut acceptor = TcpListener::bind(addr).listen();
+        chan.send(());
+        for ref mut stream in acceptor.incoming().take(max) {
+            let mut buf = [0];
+            stream.read(buf);
+            assert_eq!(buf[0], 99);
+        }
     }
 
     #[test]
     fn multiple_connect_serial_ip6() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip6();
-            let max = 10;
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                for ref mut stream in acceptor.incoming().take(max) {
-                    let mut buf = [0];
-                    stream.read(buf);
-                    assert_eq!(buf[0], 99);
-                }
-            }
+        let addr = next_test_ip6();
+        let max = 10;
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             max.times(|| {
                 let mut stream = TcpStream::connect(addr);
                 stream.write([99]);
             });
         }
+
+        let mut acceptor = TcpListener::bind(addr).listen();
+        chan.send(());
+        for ref mut stream in acceptor.incoming().take(max) {
+            let mut buf = [0];
+            stream.read(buf);
+            assert_eq!(buf[0], 99);
+        }
     }
 
     #[test]
     fn multiple_connect_interleaved_greedy_schedule_ip4() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip4();
-            static MAX: int = 10;
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) {
-                    // Start another task to handle the connection
-                    do spawntask {
-                        let mut stream = stream;
-                        let mut buf = [0];
-                        stream.read(buf);
-                        assert!(buf[0] == i as u8);
-                        debug!("read");
-                    }
+        let addr = next_test_ip4();
+        static MAX: int = 10;
+        let (port, chan) = Chan::new();
+
+        do spawn {
+            let mut acceptor = TcpListener::bind(addr).listen();
+            chan.send(());
+            for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) {
+                // Start another task to handle the connection
+                do spawn {
+                    let mut stream = stream;
+                    let mut buf = [0];
+                    stream.read(buf);
+                    assert!(buf[0] == i as u8);
+                    debug!("read");
                 }
             }
+        }
 
-            port.recv();
-            connect(0, addr);
-
-            fn connect(i: int, addr: SocketAddr) {
-                if i == MAX { return }
-
-                do spawntask {
-                    debug!("connecting");
-                    let mut stream = TcpStream::connect(addr);
-                    // Connect again before writing
-                    connect(i + 1, addr);
-                    debug!("writing");
-                    stream.write([i as u8]);
-                }
+        port.recv();
+        connect(0, addr);
+
+        fn connect(i: int, addr: SocketAddr) {
+            if i == MAX { return }
+
+            do spawn {
+                debug!("connecting");
+                let mut stream = TcpStream::connect(addr);
+                // Connect again before writing
+                connect(i + 1, addr);
+                debug!("writing");
+                stream.write([i as u8]);
             }
         }
     }
 
     #[test]
     fn multiple_connect_interleaved_greedy_schedule_ip6() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip6();
-            static MAX: int = 10;
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) {
-                    // Start another task to handle the connection
-                    do spawntask {
-                        let mut stream = stream;
-                        let mut buf = [0];
-                        stream.read(buf);
-                        assert!(buf[0] == i as u8);
-                        debug!("read");
-                    }
+        let addr = next_test_ip6();
+        static MAX: int = 10;
+        let (port, chan) = Chan::<()>::new();
+
+        do spawn {
+            let mut acceptor = TcpListener::bind(addr).listen();
+            chan.send(());
+            for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) {
+                // Start another task to handle the connection
+                do spawn {
+                    let mut stream = stream;
+                    let mut buf = [0];
+                    stream.read(buf);
+                    assert!(buf[0] == i as u8);
+                    debug!("read");
                 }
             }
+        }
 
-            port.recv();
-            connect(0, addr);
-
-            fn connect(i: int, addr: SocketAddr) {
-                if i == MAX { return }
-
-                do spawntask {
-                    debug!("connecting");
-                    let mut stream = TcpStream::connect(addr);
-                    // Connect again before writing
-                    connect(i + 1, addr);
-                    debug!("writing");
-                    stream.write([i as u8]);
-                }
+        port.recv();
+        connect(0, addr);
+
+        fn connect(i: int, addr: SocketAddr) {
+            if i == MAX { return }
+
+            do spawn {
+                debug!("connecting");
+                let mut stream = TcpStream::connect(addr);
+                // Connect again before writing
+                connect(i + 1, addr);
+                debug!("writing");
+                stream.write([i as u8]);
             }
         }
     }
 
     #[test]
     fn multiple_connect_interleaved_lazy_schedule_ip4() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip4();
-            static MAX: int = 10;
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                for stream in acceptor.incoming().take(MAX as uint) {
-                    // Start another task to handle the connection
-                    do spawntask_later {
-                        let mut stream = stream;
-                        let mut buf = [0];
-                        stream.read(buf);
-                        assert!(buf[0] == 99);
-                        debug!("read");
-                    }
+        let addr = next_test_ip4();
+        static MAX: int = 10;
+        let (port, chan) = Chan::new();
+
+        do spawn {
+            let mut acceptor = TcpListener::bind(addr).listen();
+            chan.send(());
+            for stream in acceptor.incoming().take(MAX as uint) {
+                // Start another task to handle the connection
+                do spawn {
+                    let mut stream = stream;
+                    let mut buf = [0];
+                    stream.read(buf);
+                    assert!(buf[0] == 99);
+                    debug!("read");
                 }
             }
+        }
 
-            port.recv();
-            connect(0, addr);
-
-            fn connect(i: int, addr: SocketAddr) {
-                if i == MAX { return }
-
-                do spawntask_later {
-                    debug!("connecting");
-                    let mut stream = TcpStream::connect(addr);
-                    // Connect again before writing
-                    connect(i + 1, addr);
-                    debug!("writing");
-                    stream.write([99]);
-                }
+        port.recv();
+        connect(0, addr);
+
+        fn connect(i: int, addr: SocketAddr) {
+            if i == MAX { return }
+
+            do spawn {
+                debug!("connecting");
+                let mut stream = TcpStream::connect(addr);
+                // Connect again before writing
+                connect(i + 1, addr);
+                debug!("writing");
+                stream.write([99]);
             }
         }
     }
     #[test]
     fn multiple_connect_interleaved_lazy_schedule_ip6() {
-        do run_in_mt_newsched_task {
-            let addr = next_test_ip6();
-            static MAX: int = 10;
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
-                for stream in acceptor.incoming().take(MAX as uint) {
-                    // Start another task to handle the connection
-                    do spawntask_later {
-                        let mut stream = stream;
-                        let mut buf = [0];
-                        stream.read(buf);
-                        assert!(buf[0] == 99);
-                        debug!("read");
-                    }
+        let addr = next_test_ip6();
+        static MAX: int = 10;
+        let (port, chan) = Chan::new();
+
+        do spawn {
+            let mut acceptor = TcpListener::bind(addr).listen();
+            chan.send(());
+            for stream in acceptor.incoming().take(MAX as uint) {
+                // Start another task to handle the connection
+                do spawn {
+                    let mut stream = stream;
+                    let mut buf = [0];
+                    stream.read(buf);
+                    assert!(buf[0] == 99);
+                    debug!("read");
                 }
             }
+        }
 
-            port.recv();
-            connect(0, addr);
-
-            fn connect(i: int, addr: SocketAddr) {
-                if i == MAX { return }
-
-                do spawntask_later {
-                    debug!("connecting");
-                    let mut stream = TcpStream::connect(addr);
-                    // Connect again before writing
-                    connect(i + 1, addr);
-                    debug!("writing");
-                    stream.write([99]);
-                }
+        port.recv();
+        connect(0, addr);
+
+        fn connect(i: int, addr: SocketAddr) {
+            if i == MAX { return }
+
+            do spawn {
+                debug!("connecting");
+                let mut stream = TcpStream::connect(addr);
+                // Connect again before writing
+                connect(i + 1, addr);
+                debug!("writing");
+                stream.write([99]);
             }
         }
     }
 
     #[cfg(test)]
     fn socket_name(addr: SocketAddr) {
-        do run_in_mt_newsched_task {
-            do spawntask {
-                let mut listener = TcpListener::bind(addr).unwrap();
-
-                // Make sure socket_name gives
-                // us the socket we binded to.
-                let so_name = listener.socket_name();
-                assert!(so_name.is_some());
-                assert_eq!(addr, so_name.unwrap());
+        let mut listener = TcpListener::bind(addr).unwrap();
 
-            }
-        }
+        // Make sure socket_name gives
+        // us the socket we binded to.
+        let so_name = listener.socket_name();
+        assert!(so_name.is_some());
+        assert_eq!(addr, so_name.unwrap());
     }
 
     #[cfg(test)]
     fn peer_name(addr: SocketAddr) {
-        do run_in_mt_newsched_task {
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = TcpListener::bind(addr).listen();
-                chan.send(());
+        let (port, chan) = Chan::new();
 
-                acceptor.accept();
-            }
+        do spawn {
+            let mut acceptor = TcpListener::bind(addr).listen();
+            chan.send(());
+            acceptor.accept();
+        }
 
-            port.recv();
-            let stream = TcpStream::connect(addr);
+        port.recv();
+        let stream = TcpStream::connect(addr);
 
-            assert!(stream.is_some());
-            let mut stream = stream.unwrap();
+        assert!(stream.is_some());
+        let mut stream = stream.unwrap();
 
-            // Make sure peer_name gives us the
-            // address/port of the peer we've
-            // connected to.
-            let peer_name = stream.peer_name();
-            assert!(peer_name.is_some());
-            assert_eq!(addr, peer_name.unwrap());
-        }
+        // Make sure peer_name gives us the
+        // address/port of the peer we've
+        // connected to.
+        let peer_name = stream.peer_name();
+        assert!(peer_name.is_some());
+        assert_eq!(addr, peer_name.unwrap());
     }
 
     #[test]
@@ -668,5 +615,4 @@ mod test {
         //peer_name(next_test_ip6());
         socket_name(next_test_ip6());
     }
-
 }
diff --git a/src/libstd/io/net/udp.rs b/src/libstd/io/net/udp.rs
index 1e56f964bea..7cb8f741cf3 100644
--- a/src/libstd/io/net/udp.rs
+++ b/src/libstd/io/net/udp.rs
@@ -21,14 +21,9 @@ pub struct UdpSocket {
 
 impl UdpSocket {
     pub fn bind(addr: SocketAddr) -> Option<UdpSocket> {
-        let mut io = LocalIo::borrow();
-        match io.get().udp_bind(addr) {
-            Ok(s) => Some(UdpSocket { obj: s }),
-            Err(ioerr) => {
-                io_error::cond.raise(ioerr);
-                None
-            }
-        }
+        LocalIo::maybe_raise(|io| {
+            io.udp_bind(addr).map(|s| UdpSocket { obj: s })
+        })
     }
 
     pub fn recvfrom(&mut self, buf: &mut [u8]) -> Option<(uint, SocketAddr)> {
@@ -104,52 +99,32 @@ impl Writer for UdpStream {
 #[cfg(test)]
 mod test {
     use super::*;
-    use rt::test::*;
     use io::net::ip::{Ipv4Addr, SocketAddr};
     use io::*;
+    use io::test::*;
     use prelude::*;
 
     #[test]  #[ignore]
     fn bind_error() {
-        do run_in_mt_newsched_task {
-            let mut called = false;
-            io_error::cond.trap(|e| {
-                assert!(e.kind == PermissionDenied);
-                called = true;
-            }).inside(|| {
-                let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
-                let socket = UdpSocket::bind(addr);
-                assert!(socket.is_none());
-            });
-            assert!(called);
-        }
+        let mut called = false;
+        io_error::cond.trap(|e| {
+            assert!(e.kind == PermissionDenied);
+            called = true;
+        }).inside(|| {
+            let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 };
+            let socket = UdpSocket::bind(addr);
+            assert!(socket.is_none());
+        });
+        assert!(called);
     }
 
     #[test]
     fn socket_smoke_test_ip4() {
-        do run_in_mt_newsched_task {
-            let server_ip = next_test_ip4();
-            let client_ip = next_test_ip4();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                match UdpSocket::bind(server_ip) {
-                    Some(ref mut server) => {
-                        chan.send(());
-                        let mut buf = [0];
-                        match server.recvfrom(buf) {
-                            Some((nread, src)) => {
-                                assert_eq!(nread, 1);
-                                assert_eq!(buf[0], 99);
-                                assert_eq!(src, client_ip);
-                            }
-                            None => fail!()
-                        }
-                    }
-                    None => fail!()
-                }
-            }
+        let server_ip = next_test_ip4();
+        let client_ip = next_test_ip4();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             match UdpSocket::bind(client_ip) {
                 Some(ref mut client) => {
                     port.recv();
@@ -158,33 +133,31 @@ mod test {
                 None => fail!()
             }
         }
-    }
 
-    #[test]
-    fn socket_smoke_test_ip6() {
-        do run_in_mt_newsched_task {
-            let server_ip = next_test_ip6();
-            let client_ip = next_test_ip6();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                match UdpSocket::bind(server_ip) {
-                    Some(ref mut server) => {
-                        chan.send(());
-                        let mut buf = [0];
-                        match server.recvfrom(buf) {
-                            Some((nread, src)) => {
-                                assert_eq!(nread, 1);
-                                assert_eq!(buf[0], 99);
-                                assert_eq!(src, client_ip);
-                            }
-                            None => fail!()
-                        }
+        match UdpSocket::bind(server_ip) {
+            Some(ref mut server) => {
+                chan.send(());
+                let mut buf = [0];
+                match server.recvfrom(buf) {
+                    Some((nread, src)) => {
+                        assert_eq!(nread, 1);
+                        assert_eq!(buf[0], 99);
+                        assert_eq!(src, client_ip);
                     }
                     None => fail!()
                 }
             }
+            None => fail!()
+        }
+    }
 
+    #[test]
+    fn socket_smoke_test_ip6() {
+        let server_ip = next_test_ip6();
+        let client_ip = next_test_ip6();
+        let (port, chan) = Chan::<()>::new();
+
+        do spawn {
             match UdpSocket::bind(client_ip) {
                 Some(ref mut client) => {
                     port.recv();
@@ -193,34 +166,31 @@ mod test {
                 None => fail!()
             }
         }
-    }
 
-    #[test]
-    fn stream_smoke_test_ip4() {
-        do run_in_mt_newsched_task {
-            let server_ip = next_test_ip4();
-            let client_ip = next_test_ip4();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                match UdpSocket::bind(server_ip) {
-                    Some(server) => {
-                        let server = ~server;
-                        let mut stream = server.connect(client_ip);
-                        chan.send(());
-                        let mut buf = [0];
-                        match stream.read(buf) {
-                            Some(nread) => {
-                                assert_eq!(nread, 1);
-                                assert_eq!(buf[0], 99);
-                            }
-                            None => fail!()
-                        }
+        match UdpSocket::bind(server_ip) {
+            Some(ref mut server) => {
+                chan.send(());
+                let mut buf = [0];
+                match server.recvfrom(buf) {
+                    Some((nread, src)) => {
+                        assert_eq!(nread, 1);
+                        assert_eq!(buf[0], 99);
+                        assert_eq!(src, client_ip);
                     }
                     None => fail!()
                 }
             }
+            None => fail!()
+        }
+    }
+
+    #[test]
+    fn stream_smoke_test_ip4() {
+        let server_ip = next_test_ip4();
+        let client_ip = next_test_ip4();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             match UdpSocket::bind(client_ip) {
                 Some(client) => {
                     let client = ~client;
@@ -231,34 +201,32 @@ mod test {
                 None => fail!()
             }
         }
-    }
 
-    #[test]
-    fn stream_smoke_test_ip6() {
-        do run_in_mt_newsched_task {
-            let server_ip = next_test_ip6();
-            let client_ip = next_test_ip6();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                match UdpSocket::bind(server_ip) {
-                    Some(server) => {
-                        let server = ~server;
-                        let mut stream = server.connect(client_ip);
-                        chan.send(());
-                        let mut buf = [0];
-                        match stream.read(buf) {
-                            Some(nread) => {
-                                assert_eq!(nread, 1);
-                                assert_eq!(buf[0], 99);
-                            }
-                            None => fail!()
-                        }
+        match UdpSocket::bind(server_ip) {
+            Some(server) => {
+                let server = ~server;
+                let mut stream = server.connect(client_ip);
+                chan.send(());
+                let mut buf = [0];
+                match stream.read(buf) {
+                    Some(nread) => {
+                        assert_eq!(nread, 1);
+                        assert_eq!(buf[0], 99);
                     }
                     None => fail!()
                 }
             }
+            None => fail!()
+        }
+    }
+
+    #[test]
+    fn stream_smoke_test_ip6() {
+        let server_ip = next_test_ip6();
+        let client_ip = next_test_ip6();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             match UdpSocket::bind(client_ip) {
                 Some(client) => {
                     let client = ~client;
@@ -269,25 +237,36 @@ mod test {
                 None => fail!()
             }
         }
+
+        match UdpSocket::bind(server_ip) {
+            Some(server) => {
+                let server = ~server;
+                let mut stream = server.connect(client_ip);
+                chan.send(());
+                let mut buf = [0];
+                match stream.read(buf) {
+                    Some(nread) => {
+                        assert_eq!(nread, 1);
+                        assert_eq!(buf[0], 99);
+                    }
+                    None => fail!()
+                }
+            }
+            None => fail!()
+        }
     }
 
-    #[cfg(test)]
     fn socket_name(addr: SocketAddr) {
-        do run_in_mt_newsched_task {
-            do spawntask {
-                let server = UdpSocket::bind(addr);
-
-                assert!(server.is_some());
-                let mut server = server.unwrap();
+        let server = UdpSocket::bind(addr);
 
-                // Make sure socket_name gives
-                // us the socket we binded to.
-                let so_name = server.socket_name();
-                assert!(so_name.is_some());
-                assert_eq!(addr, so_name.unwrap());
+        assert!(server.is_some());
+        let mut server = server.unwrap();
 
-            }
-        }
+        // Make sure socket_name gives
+        // us the socket we binded to.
+        let so_name = server.socket_name();
+        assert!(so_name.is_some());
+        assert_eq!(addr, so_name.unwrap());
     }
 
     #[test]
diff --git a/src/libstd/io/net/unix.rs b/src/libstd/io/net/unix.rs
index 2766aa9ad27..01b409d4316 100644
--- a/src/libstd/io/net/unix.rs
+++ b/src/libstd/io/net/unix.rs
@@ -59,14 +59,9 @@ impl UnixStream {
     ///     stream.write([1, 2, 3]);
     ///
     pub fn connect<P: ToCStr>(path: &P) -> Option<UnixStream> {
-        let mut io = LocalIo::borrow();
-        match io.get().unix_connect(&path.to_c_str()) {
-            Ok(s) => Some(UnixStream::new(s)),
-            Err(ioerr) => {
-                io_error::cond.raise(ioerr);
-                None
-            }
-        }
+        LocalIo::maybe_raise(|io| {
+            io.unix_connect(&path.to_c_str()).map(UnixStream::new)
+        })
     }
 }
 
@@ -107,14 +102,9 @@ impl UnixListener {
     ///     }
     ///
     pub fn bind<P: ToCStr>(path: &P) -> Option<UnixListener> {
-        let mut io = LocalIo::borrow();
-        match io.get().unix_bind(&path.to_c_str()) {
-            Ok(s) => Some(UnixListener{ obj: s }),
-            Err(ioerr) => {
-                io_error::cond.raise(ioerr);
-                None
-            }
-        }
+        LocalIo::maybe_raise(|io| {
+            io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s })
+        })
     }
 }
 
@@ -150,55 +140,49 @@ impl Acceptor<UnixStream> for UnixAcceptor {
 mod tests {
     use prelude::*;
     use super::*;
-    use rt::test::*;
     use io::*;
+    use io::test::*;
 
     fn smalltest(server: proc(UnixStream), client: proc(UnixStream)) {
-        do run_in_mt_newsched_task {
-            let path1 = next_test_unix();
-            let path2 = path1.clone();
-            let (client, server) = (client, server);
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = UnixListener::bind(&path1).listen();
-                chan.send(());
-                server(acceptor.accept().unwrap());
-            }
+        let path1 = next_test_unix();
+        let path2 = path1.clone();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             client(UnixStream::connect(&path2).unwrap());
         }
+
+        let mut acceptor = UnixListener::bind(&path1).listen();
+        chan.send(());
+        server(acceptor.accept().unwrap());
     }
 
     #[test]
     fn bind_error() {
-        do run_in_mt_newsched_task {
-            let mut called = false;
-            io_error::cond.trap(|e| {
-                assert!(e.kind == PermissionDenied);
-                called = true;
-            }).inside(|| {
-                let listener = UnixListener::bind(&("path/to/nowhere"));
-                assert!(listener.is_none());
-            });
-            assert!(called);
-        }
+        let mut called = false;
+        io_error::cond.trap(|e| {
+            assert!(e.kind == PermissionDenied);
+            called = true;
+        }).inside(|| {
+            let listener = UnixListener::bind(&("path/to/nowhere"));
+            assert!(listener.is_none());
+        });
+        assert!(called);
     }
 
     #[test]
     fn connect_error() {
-        do run_in_mt_newsched_task {
-            let mut called = false;
-            io_error::cond.trap(|e| {
-                assert_eq!(e.kind, FileNotFound);
-                called = true;
-            }).inside(|| {
-                let stream = UnixStream::connect(&("path/to/nowhere"));
-                assert!(stream.is_none());
-            });
-            assert!(called);
-        }
+        let mut called = false;
+        io_error::cond.trap(|e| {
+            assert_eq!(e.kind,
+                       if cfg!(windows) {OtherIoError} else {FileNotFound});
+            called = true;
+        }).inside(|| {
+            let stream = UnixStream::connect(&("path/to/nowhere"));
+            assert!(stream.is_none());
+        });
+        assert!(called);
     }
 
     #[test]
@@ -244,37 +228,33 @@ mod tests {
 
     #[test]
     fn accept_lots() {
-        do run_in_mt_newsched_task {
-            let times = 10;
-            let path1 = next_test_unix();
-            let path2 = path1.clone();
-            let (port, chan) = Chan::new();
-
-            do spawntask {
-                let mut acceptor = UnixListener::bind(&path1).listen();
-                chan.send(());
-                times.times(|| {
-                    let mut client = acceptor.accept();
-                    let mut buf = [0];
-                    client.read(buf);
-                    assert_eq!(buf[0], 100);
-                })
-            }
+        let times = 10;
+        let path1 = next_test_unix();
+        let path2 = path1.clone();
+        let (port, chan) = Chan::new();
 
+        do spawn {
             port.recv();
             times.times(|| {
                 let mut stream = UnixStream::connect(&path2);
                 stream.write([100]);
             })
         }
+
+        let mut acceptor = UnixListener::bind(&path1).listen();
+        chan.send(());
+        times.times(|| {
+            let mut client = acceptor.accept();
+            let mut buf = [0];
+            client.read(buf);
+            assert_eq!(buf[0], 100);
+        })
     }
 
     #[test]
     fn path_exists() {
-        do run_in_mt_newsched_task {
-            let path = next_test_unix();
-            let _acceptor = UnixListener::bind(&path).listen();
-            assert!(path.exists());
-        }
+        let path = next_test_unix();
+        let _acceptor = UnixListener::bind(&path).listen();
+        assert!(path.exists());
     }
 }
diff --git a/src/libstd/io/option.rs b/src/libstd/io/option.rs
index 61c5411f360..a661d6ab7eb 100644
--- a/src/libstd/io/option.rs
+++ b/src/libstd/io/option.rs
@@ -106,53 +106,46 @@ impl<T, A: Acceptor<T>> Acceptor<T> for Option<A> {
 mod test {
     use option::*;
     use super::super::mem::*;
-    use rt::test::*;
     use super::super::{PreviousIoError, io_error};
 
     #[test]
     fn test_option_writer() {
-        do run_in_mt_newsched_task {
-            let mut writer: Option<MemWriter> = Some(MemWriter::new());
-            writer.write([0, 1, 2]);
-            writer.flush();
-            assert_eq!(writer.unwrap().inner(), ~[0, 1, 2]);
-        }
+        let mut writer: Option<MemWriter> = Some(MemWriter::new());
+        writer.write([0, 1, 2]);
+        writer.flush();
+        assert_eq!(writer.unwrap().inner(), ~[0, 1, 2]);
     }
 
     #[test]
     fn test_option_writer_error() {
-        do run_in_mt_newsched_task {
-            let mut writer: Option<MemWriter> = None;
-
-            let mut called = false;
-            io_error::cond.trap(|err| {
-                assert_eq!(err.kind, PreviousIoError);
-                called = true;
-            }).inside(|| {
-                writer.write([0, 0, 0]);
-            });
-            assert!(called);
-
-            let mut called = false;
-            io_error::cond.trap(|err| {
-                assert_eq!(err.kind, PreviousIoError);
-                called = true;
-            }).inside(|| {
-                writer.flush();
-            });
-            assert!(called);
-        }
+        let mut writer: Option<MemWriter> = None;
+
+        let mut called = false;
+        io_error::cond.trap(|err| {
+            assert_eq!(err.kind, PreviousIoError);
+            called = true;
+        }).inside(|| {
+            writer.write([0, 0, 0]);
+        });
+        assert!(called);
+
+        let mut called = false;
+        io_error::cond.trap(|err| {
+            assert_eq!(err.kind, PreviousIoError);
+            called = true;
+        }).inside(|| {
+            writer.flush();
+        });
+        assert!(called);
     }
 
     #[test]
     fn test_option_reader() {
-        do run_in_mt_newsched_task {
-            let mut reader: Option<MemReader> = Some(MemReader::new(~[0, 1, 2, 3]));
-            let mut buf = [0, 0];
-            reader.read(buf);
-            assert_eq!(buf, [0, 1]);
-            assert!(!reader.eof());
-        }
+        let mut reader: Option<MemReader> = Some(MemReader::new(~[0, 1, 2, 3]));
+        let mut buf = [0, 0];
+        reader.read(buf);
+        assert_eq!(buf, [0, 1]);
+        assert!(!reader.eof());
     }
 
     #[test]
diff --git a/src/libstd/io/pipe.rs b/src/libstd/io/pipe.rs
index 252575ee445..2349c64a84b 100644
--- a/src/libstd/io/pipe.rs
+++ b/src/libstd/io/pipe.rs
@@ -14,10 +14,9 @@
 //! enough so that pipes can be created to child processes.
 
 use prelude::*;
-use super::{Reader, Writer};
 use io::{io_error, EndOfFile};
-use io::native::file;
-use rt::rtio::{LocalIo, RtioPipe};
+use libc;
+use rt::rtio::{RtioPipe, LocalIo};
 
 pub struct PipeStream {
     priv obj: ~RtioPipe,
@@ -43,15 +42,10 @@ impl PipeStream {
     ///
     /// If the pipe cannot be created, an error will be raised on the
     /// `io_error` condition.
-    pub fn open(fd: file::fd_t) -> Option<PipeStream> {
-        let mut io = LocalIo::borrow();
-        match io.get().pipe_open(fd) {
-            Ok(obj) => Some(PipeStream { obj: obj }),
-            Err(e) => {
-                io_error::cond.raise(e);
-                None
-            }
-        }
+    pub fn open(fd: libc::c_int) -> Option<PipeStream> {
+        LocalIo::maybe_raise(|io| {
+            io.pipe_open(fd).map(|obj| PipeStream { obj: obj })
+        })
     }
 
     pub fn new(inner: ~RtioPipe) -> PipeStream {
diff --git a/src/libstd/io/process.rs b/src/libstd/io/process.rs
index 001faa1ecaf..bbb2a7ef398 100644
--- a/src/libstd/io/process.rs
+++ b/src/libstd/io/process.rs
@@ -119,19 +119,17 @@ impl Process {
     /// Creates a new pipe initialized, but not bound to any particular
     /// source/destination
     pub fn new(config: ProcessConfig) -> Option<Process> {
-        let mut io = LocalIo::borrow();
-        match io.get().spawn(config) {
-            Ok((p, io)) => Some(Process{
-                handle: p,
-                io: io.move_iter().map(|p|
-                    p.map(|p| io::PipeStream::new(p))
-                ).collect()
-            }),
-            Err(ioerr) => {
-                io_error::cond.raise(ioerr);
-                None
-            }
-        }
+        let mut config = Some(config);
+        LocalIo::maybe_raise(|io| {
+            io.spawn(config.take_unwrap()).map(|(p, io)| {
+                Process {
+                    handle: p,
+                    io: io.move_iter().map(|p| {
+                        p.map(|p| io::PipeStream::new(p))
+                    }).collect()
+                }
+            })
+        })
     }
 
     /// Returns the process id of this child process
diff --git a/src/libstd/io/signal.rs b/src/libstd/io/signal.rs
index 00d84e22c25..4cde35796a6 100644
--- a/src/libstd/io/signal.rs
+++ b/src/libstd/io/signal.rs
@@ -23,8 +23,7 @@ use clone::Clone;
 use comm::{Port, SharedChan};
 use container::{Map, MutableMap};
 use hashmap;
-use io::io_error;
-use result::{Err, Ok};
+use option::{Some, None};
 use rt::rtio::{IoFactory, LocalIo, RtioSignal};
 
 #[repr(int)]
@@ -122,16 +121,14 @@ impl Listener {
         if self.handles.contains_key(&signum) {
             return true; // self is already listening to signum, so succeed
         }
-        let mut io = LocalIo::borrow();
-        match io.get().signal(signum, self.chan.clone()) {
-            Ok(w) => {
-                self.handles.insert(signum, w);
+        match LocalIo::maybe_raise(|io| {
+            io.signal(signum, self.chan.clone())
+        }) {
+            Some(handle) => {
+                self.handles.insert(signum, handle);
                 true
-            },
-            Err(ioerr) => {
-                io_error::cond.raise(ioerr);
-                false
             }
+            None => false
         }
     }
 
diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs
index 41337075aa9..1e4fa7968dc 100644
--- a/src/libstd/io/stdio.rs
+++ b/src/libstd/io/stdio.rs
@@ -27,13 +27,13 @@ out.write(bytes!("Hello, world!"));
 */
 
 use fmt;
+use io::buffered::LineBufferedWriter;
+use io::{Reader, Writer, io_error, IoError, OtherIoError,
+         standard_error, EndOfFile};
 use libc;
 use option::{Option, Some, None};
 use result::{Ok, Err};
-use io::buffered::LineBufferedWriter;
 use rt::rtio::{DontClose, IoFactory, LocalIo, RtioFileStream, RtioTTY};
-use super::{Reader, Writer, io_error, IoError, OtherIoError,
-            standard_error, EndOfFile};
 
 // And so begins the tale of acquiring a uv handle to a stdio stream on all
 // platforms in all situations. Our story begins by splitting the world into two
@@ -69,19 +69,12 @@ enum StdSource {
 }
 
 fn src<T>(fd: libc::c_int, readable: bool, f: |StdSource| -> T) -> T {
-    let mut io = LocalIo::borrow();
-    match io.get().tty_open(fd, readable) {
-        Ok(tty) => f(TTY(tty)),
-        Err(_) => {
-            // It's not really that desirable if these handles are closed
-            // synchronously, and because they're squirreled away in a task
-            // structure the destructors will be run when the task is
-            // attempted to get destroyed. This means that if we run a
-            // synchronous destructor we'll attempt to do some scheduling
-            // operations which will just result in sadness.
-            f(File(io.get().fs_from_raw_fd(fd, DontClose)))
-        }
-    }
+    LocalIo::maybe_raise(|io| {
+        Ok(match io.tty_open(fd, readable) {
+            Ok(tty) => f(TTY(tty)),
+            Err(_) => f(File(io.fs_from_raw_fd(fd, DontClose))),
+        })
+    }).unwrap()
 }
 
 /// Creates a new non-blocking handle to the stdin of the current process.
@@ -138,7 +131,17 @@ fn with_task_stdout(f: |&mut Writer|) {
             }
 
             None => {
-                let mut io = stdout();
+                struct Stdout;
+                impl Writer for Stdout {
+                    fn write(&mut self, data: &[u8]) {
+                        unsafe {
+                            libc::write(libc::STDOUT_FILENO,
+                                        data.as_ptr() as *libc::c_void,
+                                        data.len() as libc::size_t);
+                        }
+                    }
+                }
+                let mut io = Stdout;
                 f(&mut io as &mut Writer);
             }
         }
@@ -304,23 +307,10 @@ impl Writer for StdWriter {
 
 #[cfg(test)]
 mod tests {
-    use super::*;
-    use rt::test::run_in_newsched_task;
-
-    #[test]
-    fn smoke_uv() {
+    iotest!(fn smoke() {
         // Just make sure we can acquire handles
         stdin();
         stdout();
         stderr();
-    }
-
-    #[test]
-    fn smoke_native() {
-        do run_in_newsched_task {
-            stdin();
-            stdout();
-            stderr();
-        }
-    }
+    })
 }
diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs
new file mode 100644
index 00000000000..4be11227965
--- /dev/null
+++ b/src/libstd/io/test.rs
@@ -0,0 +1,195 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+#[macro_escape];
+
+use os;
+use prelude::*;
+use rand;
+use rand::Rng;
+use std::io::net::ip::*;
+use sync::atomics::{AtomicUint, INIT_ATOMIC_UINT, Relaxed};
+
+macro_rules! iotest (
+    { fn $name:ident() $b:block } => (
+        mod $name {
+            #[allow(unused_imports)];
+
+            use super::super::*;
+            use super::*;
+            use io;
+            use prelude::*;
+            use io::*;
+            use io::fs::*;
+            use io::net::tcp::*;
+            use io::net::ip::*;
+            use io::net::udp::*;
+            #[cfg(unix)]
+            use io::net::unix::*;
+            use str;
+            use util;
+
+            fn f() $b
+
+            #[test] fn green() { f() }
+            #[test] fn native() {
+                use native;
+                let (p, c) = Chan::new();
+                do native::task::spawn { c.send(f()) }
+                p.recv();
+            }
+        }
+    )
+)
+
+/// Get a port number, starting at 9600, for use in tests
+pub fn next_test_port() -> u16 {
+    static mut next_offset: AtomicUint = INIT_ATOMIC_UINT;
+    unsafe {
+        base_port() + next_offset.fetch_add(1, Relaxed) as u16
+    }
+}
+
+/// Get a temporary path which could be the location of a unix socket
+pub fn next_test_unix() -> Path {
+    if cfg!(unix) {
+        os::tmpdir().join(rand::task_rng().gen_ascii_str(20))
+    } else {
+        Path::new(r"\\.\pipe\" + rand::task_rng().gen_ascii_str(20))
+    }
+}
+
+/// Get a unique IPv4 localhost:port pair starting at 9600
+pub fn next_test_ip4() -> SocketAddr {
+    SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
+}
+
+/// Get a unique IPv6 localhost:port pair starting at 9600
+pub fn next_test_ip6() -> SocketAddr {
+    SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
+}
+
+/*
+XXX: Welcome to MegaHack City.
+
+The bots run multiple builds at the same time, and these builds
+all want to use ports. This function figures out which workspace
+it is running in and assigns a port range based on it.
+*/
+fn base_port() -> u16 {
+
+    let base = 9600u16;
+    let range = 1000u16;
+
+    let bases = [
+        ("32-opt", base + range * 1),
+        ("32-noopt", base + range * 2),
+        ("64-opt", base + range * 3),
+        ("64-noopt", base + range * 4),
+        ("64-opt-vg", base + range * 5),
+        ("all-opt", base + range * 6),
+        ("snap3", base + range * 7),
+        ("dist", base + range * 8)
+    ];
+
+    // FIXME (#9639): This needs to handle non-utf8 paths
+    let path = os::getcwd();
+    let path_s = path.as_str().unwrap();
+
+    let mut final_base = base;
+
+    for &(dir, base) in bases.iter() {
+        if path_s.contains(dir) {
+            final_base = base;
+            break;
+        }
+    }
+
+    return final_base;
+}
+
+pub fn raise_fd_limit() {
+    unsafe { darwin_fd_limit::raise_fd_limit() }
+}
+
+#[cfg(target_os="macos")]
+#[allow(non_camel_case_types)]
+mod darwin_fd_limit {
+    /*!
+     * darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
+     * rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
+     * for our multithreaded scheduler testing, depending on the number of cores available.
+     *
+     * This fixes issue #7772.
+     */
+
+    use libc;
+    type rlim_t = libc::uint64_t;
+    struct rlimit {
+        rlim_cur: rlim_t,
+        rlim_max: rlim_t
+    }
+    #[nolink]
+    extern {
+        // name probably doesn't need to be mut, but the C function doesn't specify const
+        fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
+                  oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
+                  newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
+        fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
+        fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
+    }
+    static CTL_KERN: libc::c_int = 1;
+    static KERN_MAXFILESPERPROC: libc::c_int = 29;
+    static RLIMIT_NOFILE: libc::c_int = 8;
+
+    pub unsafe fn raise_fd_limit() {
+        // The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
+        // sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
+        use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null};
+        use mem::size_of_val;
+        use os::last_os_error;
+
+        // Fetch the kern.maxfilesperproc value
+        let mut mib: [libc::c_int, ..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
+        let mut maxfiles: libc::c_int = 0;
+        let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
+        if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2,
+                  to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void,
+                  to_mut_unsafe_ptr(&mut size),
+                  mut_null(), 0) != 0 {
+            let err = last_os_error();
+            error!("raise_fd_limit: error calling sysctl: {}", err);
+            return;
+        }
+
+        // Fetch the current resource limits
+        let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
+        if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim)) != 0 {
+            let err = last_os_error();
+            error!("raise_fd_limit: error calling getrlimit: {}", err);
+            return;
+        }
+
+        // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
+        rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
+
+        // Set our newly-increased resource limit
+        if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim)) != 0 {
+            let err = last_os_error();
+            error!("raise_fd_limit: error calling setrlimit: {}", err);
+            return;
+        }
+    }
+}
+
+#[cfg(not(target_os="macos"))]
+mod darwin_fd_limit {
+    pub unsafe fn raise_fd_limit() {}
+}
diff --git a/src/libstd/io/timer.rs b/src/libstd/io/timer.rs
index 9d4a72509e7..7c9aa28bfe9 100644
--- a/src/libstd/io/timer.rs
+++ b/src/libstd/io/timer.rs
@@ -39,9 +39,7 @@ loop {
 */
 
 use comm::Port;
-use option::{Option, Some, None};
-use result::{Ok, Err};
-use io::io_error;
+use option::Option;
 use rt::rtio::{IoFactory, LocalIo, RtioTimer};
 
 pub struct Timer {
@@ -60,15 +58,7 @@ impl Timer {
     /// for a number of milliseconds, or to possibly create channels which will
     /// get notified after an amount of time has passed.
     pub fn new() -> Option<Timer> {
-        let mut io = LocalIo::borrow();
-        match io.get().timer_init() {
-            Ok(t) => Some(Timer { obj: t }),
-            Err(ioerr) => {
-                debug!("Timer::init: failed to init: {:?}", ioerr);
-                io_error::cond.raise(ioerr);
-                None
-            }
-        }
+        LocalIo::maybe_raise(|io| io.timer_init().map(|t| Timer { obj: t }))
     }
 
     /// Blocks the current task for `msecs` milliseconds.
@@ -108,77 +98,60 @@ impl Timer {
 mod test {
     use prelude::*;
     use super::*;
-    use rt::test::*;
 
     #[test]
     fn test_io_timer_sleep_simple() {
-        do run_in_mt_newsched_task {
-            let mut timer = Timer::new().unwrap();
-            timer.sleep(1);
-        }
+        let mut timer = Timer::new().unwrap();
+        timer.sleep(1);
     }
 
     #[test]
     fn test_io_timer_sleep_oneshot() {
-        do run_in_mt_newsched_task {
-            let mut timer = Timer::new().unwrap();
-            timer.oneshot(1).recv();
-        }
+        let mut timer = Timer::new().unwrap();
+        timer.oneshot(1).recv();
     }
 
     #[test]
     fn test_io_timer_sleep_oneshot_forget() {
-        do run_in_mt_newsched_task {
-            let mut timer = Timer::new().unwrap();
-            timer.oneshot(100000000000);
-        }
+        let mut timer = Timer::new().unwrap();
+        timer.oneshot(100000000000);
     }
 
     #[test]
     fn oneshot_twice() {
-        do run_in_mt_newsched_task {
-            let mut timer = Timer::new().unwrap();
-            let port1 = timer.oneshot(10000);
-            let port = timer.oneshot(1);
-            port.recv();
-            assert_eq!(port1.try_recv(), None);
-        }
+        let mut timer = Timer::new().unwrap();
+        let port1 = timer.oneshot(10000);
+        let port = timer.oneshot(1);
+        port.recv();
+        assert_eq!(port1.try_recv(), None);
     }
 
     #[test]
     fn test_io_timer_oneshot_then_sleep() {
-        do run_in_mt_newsched_task {
-            let mut timer = Timer::new().unwrap();
-            let port = timer.oneshot(100000000000);
-            timer.sleep(1); // this should invalidate the port
+        let mut timer = Timer::new().unwrap();
+        let port = timer.oneshot(100000000000);
+        timer.sleep(1); // this should invalidate the port
 
-            assert_eq!(port.try_recv(), None);
-        }
+        assert_eq!(port.try_recv(), None);
     }
 
     #[test]
     fn test_io_timer_sleep_periodic() {
-        do run_in_mt_newsched_task {
-            let mut timer = Timer::new().unwrap();
-            let port = timer.periodic(1);
-            port.recv();
-            port.recv();
-            port.recv();
-        }
+        let mut timer = Timer::new().unwrap();
+        let port = timer.periodic(1);
+        port.recv();
+        port.recv();
+        port.recv();
     }
 
     #[test]
     fn test_io_timer_sleep_periodic_forget() {
-        do run_in_mt_newsched_task {
-            let mut timer = Timer::new().unwrap();
-            timer.periodic(100000000000);
-        }
+        let mut timer = Timer::new().unwrap();
+        timer.periodic(100000000000);
     }
 
     #[test]
     fn test_io_timer_sleep_standalone() {
-        do run_in_mt_newsched_task {
-            sleep(1)
-        }
+        sleep(1)
     }
 }
diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs
index b2b856c5c83..4f633a63bab 100644
--- a/src/libstd/lib.rs
+++ b/src/libstd/lib.rs
@@ -65,13 +65,15 @@
 // When testing libstd, bring in libuv as the I/O backend so tests can print
 // things and all of the std::io tests have an I/O interface to run on top
 // of
-#[cfg(test)] extern mod rustuv = "rustuv#0.9-pre";
+#[cfg(test)] extern mod rustuv = "rustuv";
+#[cfg(test)] extern mod native = "native";
+#[cfg(test)] extern mod green = "green";
 
 // Make extra accessible for benchmarking
-#[cfg(test)] extern mod extra = "extra#0.9-pre";
+#[cfg(test)] extern mod extra = "extra";
 
 // Make std testable by not duplicating lang items. See #2912
-#[cfg(test)] extern mod realstd = "std#0.9-pre";
+#[cfg(test)] extern mod realstd = "std";
 #[cfg(test)] pub use kinds = realstd::kinds;
 #[cfg(test)] pub use ops = realstd::ops;
 #[cfg(test)] pub use cmp = realstd::cmp;
@@ -159,6 +161,7 @@ pub mod trie;
 pub mod task;
 pub mod comm;
 pub mod local_data;
+pub mod sync;
 
 
 /* Runtime and platform support */
diff --git a/src/libstd/local_data.rs b/src/libstd/local_data.rs
index 652aa4d8198..d7e11d2f3a7 100644
--- a/src/libstd/local_data.rs
+++ b/src/libstd/local_data.rs
@@ -432,6 +432,7 @@ mod tests {
     }
 
     #[test]
+    #[allow(dead_code)]
     fn test_tls_overwrite_multiple_types() {
         static str_key: Key<~str> = &Key;
         static box_key: Key<@()> = &Key;
diff --git a/src/libstd/logging.rs b/src/libstd/logging.rs
index dbe8b3247c0..fb83cfdd6ea 100644
--- a/src/libstd/logging.rs
+++ b/src/libstd/logging.rs
@@ -118,26 +118,16 @@ pub static ERROR: u32 = 1;
 /// It is not recommended to call this function directly, rather it should be
 /// invoked through the logging family of macros.
 pub fn log(_level: u32, args: &fmt::Arguments) {
-    unsafe {
-        let optional_task: Option<*mut Task> = Local::try_unsafe_borrow();
-        match optional_task {
-            Some(local) => {
-                // Lazily initialize the local task's logger
-                match (*local).logger {
-                    // Use the available logger if we have one
-                    Some(ref mut logger) => { logger.log(args); }
-                    None => {
-                        let mut logger = StdErrLogger::new();
-                        logger.log(args);
-                        (*local).logger = Some(logger);
-                    }
-                }
-            }
-            // If there's no local task, then always log to stderr
-            None => {
-                let mut logger = StdErrLogger::new();
-                logger.log(args);
-            }
-        }
+    let mut logger = {
+        let mut task = Local::borrow(None::<Task>);
+        task.get().logger.take()
+    };
+
+    if logger.is_none() {
+        logger = Some(StdErrLogger::new());
     }
+    logger.get_mut_ref().log(args);
+
+    let mut task = Local::borrow(None::<Task>);
+    task.get().logger = logger;
 }
diff --git a/src/libstd/os.rs b/src/libstd/os.rs
index 8da7c0340f7..8f2f2190885 100644
--- a/src/libstd/os.rs
+++ b/src/libstd/os.rs
@@ -28,8 +28,6 @@
 
 #[allow(missing_doc)];
 
-#[cfg(unix)]
-use c_str::CString;
 use clone::Clone;
 use container::Container;
 #[cfg(target_os = "macos")]
@@ -43,8 +41,7 @@ use ptr;
 use str;
 use to_str;
 use unstable::finally::Finally;
-
-pub use os::consts::*;
+use sync::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
 
 /// Delegates to the libc close() function, returning the same return value.
 pub fn close(fd: c_int) -> c_int {
@@ -58,6 +55,8 @@ static BUF_BYTES : uint = 2048u;
 
 #[cfg(unix)]
 pub fn getcwd() -> Path {
+    use c_str::CString;
+
     let mut buf = [0 as libc::c_char, ..BUF_BYTES];
     unsafe {
         if libc::getcwd(buf.as_mut_ptr(), buf.len() as size_t).is_null() {
@@ -333,7 +332,7 @@ pub fn pipe() -> Pipe {
 
 /// Returns the proper dll filename for the given basename of a file.
 pub fn dll_filename(base: &str) -> ~str {
-    format!("{}{}{}", DLL_PREFIX, base, DLL_SUFFIX)
+    format!("{}{}{}", consts::DLL_PREFIX, base, consts::DLL_SUFFIX)
 }
 
 /// Optionally returns the filesystem path to the current executable which is
@@ -675,17 +674,26 @@ pub fn last_os_error() -> ~str {
     strerror()
 }
 
+static mut EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT;
+
 /**
  * Sets the process exit code
  *
  * Sets the exit code returned by the process if all supervised tasks
  * terminate successfully (without failing). If the current root task fails
  * and is supervised by the scheduler then any user-specified exit status is
- * ignored and the process exits with the default failure status
+ * ignored and the process exits with the default failure status.
+ *
+ * Note that this is not synchronized against modifications of other threads.
  */
 pub fn set_exit_status(code: int) {
-    use rt;
-    rt::set_exit_status(code);
+    unsafe { EXIT_STATUS.store(code, SeqCst) }
+}
+
+/// Fetches the process's current exit code. This defaults to 0 and can change
+/// by calling `set_exit_status`.
+pub fn get_exit_status() -> int {
+    unsafe { EXIT_STATUS.load(SeqCst) }
 }
 
 #[cfg(target_os = "macos")]
diff --git a/src/libstd/rt/basic.rs b/src/libstd/rt/basic.rs
deleted file mode 100644
index 3589582357c..00000000000
--- a/src/libstd/rt/basic.rs
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! This is a basic event loop implementation not meant for any "real purposes"
-//! other than testing the scheduler and proving that it's possible to have a
-//! pluggable event loop.
-
-use prelude::*;
-
-use cast;
-use rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausableIdleCallback,
-               Callback};
-use unstable::sync::Exclusive;
-use io::native;
-use util;
-
-/// This is the only exported function from this module.
-pub fn event_loop() -> ~EventLoop {
-    ~BasicLoop::new() as ~EventLoop
-}
-
-struct BasicLoop {
-    work: ~[proc()],                  // pending work
-    idle: Option<*mut BasicPausable>, // only one is allowed
-    remotes: ~[(uint, ~Callback)],
-    next_remote: uint,
-    messages: Exclusive<~[Message]>,
-    io: ~IoFactory,
-}
-
-enum Message { RunRemote(uint), RemoveRemote(uint) }
-
-impl BasicLoop {
-    fn new() -> BasicLoop {
-        BasicLoop {
-            work: ~[],
-            idle: None,
-            next_remote: 0,
-            remotes: ~[],
-            messages: Exclusive::new(~[]),
-            io: ~native::IoFactory as ~IoFactory,
-        }
-    }
-
-    /// Process everything in the work queue (continually)
-    fn work(&mut self) {
-        while self.work.len() > 0 {
-            for work in util::replace(&mut self.work, ~[]).move_iter() {
-                work();
-            }
-        }
-    }
-
-    fn remote_work(&mut self) {
-        let messages = unsafe {
-            self.messages.with(|messages| {
-                if messages.len() > 0 {
-                    Some(util::replace(messages, ~[]))
-                } else {
-                    None
-                }
-            })
-        };
-        let messages = match messages {
-            Some(m) => m, None => return
-        };
-        for message in messages.iter() {
-            self.message(*message);
-        }
-    }
-
-    fn message(&mut self, message: Message) {
-        match message {
-            RunRemote(i) => {
-                match self.remotes.mut_iter().find(|& &(id, _)| id == i) {
-                    Some(&(_, ref mut f)) => f.call(),
-                    None => unreachable!()
-                }
-            }
-            RemoveRemote(i) => {
-                match self.remotes.iter().position(|&(id, _)| id == i) {
-                    Some(i) => { self.remotes.remove(i); }
-                    None => unreachable!()
-                }
-            }
-        }
-    }
-
-    /// Run the idle callback if one is registered
-    fn idle(&mut self) {
-        unsafe {
-            match self.idle {
-                Some(idle) => {
-                    if (*idle).active {
-                        (*idle).work.call();
-                    }
-                }
-                None => {}
-            }
-        }
-    }
-
-    fn has_idle(&self) -> bool {
-        unsafe { self.idle.is_some() && (**self.idle.get_ref()).active }
-    }
-}
-
-impl EventLoop for BasicLoop {
-    fn run(&mut self) {
-        // Not exactly efficient, but it gets the job done.
-        while self.remotes.len() > 0 || self.work.len() > 0 || self.has_idle() {
-
-            self.work();
-            self.remote_work();
-
-            if self.has_idle() {
-                self.idle();
-                continue
-            }
-
-            unsafe {
-                // We block here if we have no messages to process and we may
-                // receive a message at a later date
-                self.messages.hold_and_wait(|messages| {
-                    self.remotes.len() > 0 &&
-                        messages.len() == 0 &&
-                        self.work.len() == 0
-                })
-            }
-        }
-    }
-
-    fn callback(&mut self, f: proc()) {
-        self.work.push(f);
-    }
-
-    // XXX: Seems like a really weird requirement to have an event loop provide.
-    fn pausable_idle_callback(&mut self, cb: ~Callback) -> ~PausableIdleCallback {
-        let callback = ~BasicPausable::new(self, cb);
-        rtassert!(self.idle.is_none());
-        unsafe {
-            let cb_ptr: &*mut BasicPausable = cast::transmute(&callback);
-            self.idle = Some(*cb_ptr);
-        }
-        return callback as ~PausableIdleCallback;
-    }
-
-    fn remote_callback(&mut self, f: ~Callback) -> ~RemoteCallback {
-        let id = self.next_remote;
-        self.next_remote += 1;
-        self.remotes.push((id, f));
-        ~BasicRemote::new(self.messages.clone(), id) as ~RemoteCallback
-    }
-
-    fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> {
-        let factory: &mut IoFactory = self.io;
-        Some(factory)
-    }
-}
-
-struct BasicRemote {
-    queue: Exclusive<~[Message]>,
-    id: uint,
-}
-
-impl BasicRemote {
-    fn new(queue: Exclusive<~[Message]>, id: uint) -> BasicRemote {
-        BasicRemote { queue: queue, id: id }
-    }
-}
-
-impl RemoteCallback for BasicRemote {
-    fn fire(&mut self) {
-        unsafe {
-            self.queue.hold_and_signal(|queue| {
-                queue.push(RunRemote(self.id));
-            })
-        }
-    }
-}
-
-impl Drop for BasicRemote {
-    fn drop(&mut self) {
-        unsafe {
-            self.queue.hold_and_signal(|queue| {
-                queue.push(RemoveRemote(self.id));
-            })
-        }
-    }
-}
-
-struct BasicPausable {
-    eloop: *mut BasicLoop,
-    work: ~Callback,
-    active: bool,
-}
-
-impl BasicPausable {
-    fn new(eloop: &mut BasicLoop, cb: ~Callback) -> BasicPausable {
-        BasicPausable {
-            active: false,
-            work: cb,
-            eloop: eloop,
-        }
-    }
-}
-
-impl PausableIdleCallback for BasicPausable {
-    fn pause(&mut self) {
-        self.active = false;
-    }
-    fn resume(&mut self) {
-        self.active = true;
-    }
-}
-
-impl Drop for BasicPausable {
-    fn drop(&mut self) {
-        unsafe {
-            (*self.eloop).idle = None;
-        }
-    }
-}
diff --git a/src/libstd/rt/borrowck.rs b/src/libstd/rt/borrowck.rs
index 423981d9e91..d1e97cb6ec0 100644
--- a/src/libstd/rt/borrowck.rs
+++ b/src/libstd/rt/borrowck.rs
@@ -12,9 +12,8 @@ use c_str::{ToCStr, CString};
 use libc::{c_char, size_t};
 use option::{Option, None, Some};
 use ptr::RawPtr;
-use rt::env;
+use rt;
 use rt::local::Local;
-use rt::task;
 use rt::task::Task;
 use str::OwnedStr;
 use str;
@@ -62,7 +61,7 @@ unsafe fn fail_borrowed(alloc: *mut raw::Box<()>, file: *c_char, line: size_t)
     match try_take_task_borrow_list() {
         None => { // not recording borrows
             let msg = "borrowed";
-            msg.with_c_str(|msg_p| task::begin_unwind_raw(msg_p, file, line))
+            msg.with_c_str(|msg_p| rt::begin_unwind_raw(msg_p, file, line))
         }
         Some(borrow_list) => { // recording borrows
             let mut msg = ~"borrowed";
@@ -76,7 +75,7 @@ unsafe fn fail_borrowed(alloc: *mut raw::Box<()>, file: *c_char, line: size_t)
                     sep = " and at ";
                 }
             }
-            msg.with_c_str(|msg_p| task::begin_unwind_raw(msg_p, file, line))
+            msg.with_c_str(|msg_p| rt::begin_unwind_raw(msg_p, file, line))
         }
     }
 }
@@ -95,7 +94,7 @@ unsafe fn debug_borrow<T,P:RawPtr<T>>(tag: &'static str,
     //! A useful debugging function that prints a pointer + tag + newline
     //! without allocating memory.
 
-    if ENABLE_DEBUG && env::debug_borrow() {
+    if ENABLE_DEBUG && rt::env::debug_borrow() {
         debug_borrow_slow(tag, p, old_bits, new_bits, filename, line);
     }
 
@@ -180,7 +179,7 @@ pub unsafe fn unrecord_borrow(a: *u8,
             if br.alloc != a || br.file != file || br.line != line {
                 let err = format!("wrong borrow found, br={:?}", br);
                 err.with_c_str(|msg_p| {
-                    task::begin_unwind_raw(msg_p, file, line)
+                    rt::begin_unwind_raw(msg_p, file, line)
                 })
             }
             borrow_list
diff --git a/src/libstd/rt/context.rs b/src/libstd/rt/context.rs
deleted file mode 100644
index 31cf0696881..00000000000
--- a/src/libstd/rt/context.rs
+++ /dev/null
@@ -1,463 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use option::*;
-use super::stack::StackSegment;
-use libc::c_void;
-use uint;
-use cast::{transmute, transmute_mut_unsafe,
-           transmute_region, transmute_mut_region};
-
-pub static RED_ZONE: uint = 20 * 1024;
-
-// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing
-// SSE regs.  It would be marginally better not to do this. In C++ we
-// use an attribute on a struct.
-// FIXME #7761: It would be nice to define regs as `~Option<Registers>` since
-// the registers are sometimes empty, but the discriminant would
-// then misalign the regs again.
-pub struct Context {
-    /// The context entry point, saved here for later destruction
-    priv start: Option<~proc()>,
-    /// Hold the registers while the task or scheduler is suspended
-    priv regs: ~Registers,
-    /// Lower bound and upper bound for the stack
-    priv stack_bounds: Option<(uint, uint)>,
-}
-
-impl Context {
-    pub fn empty() -> Context {
-        Context {
-            start: None,
-            regs: new_regs(),
-            stack_bounds: None,
-        }
-    }
-
-    /// Create a new context that will resume execution by running proc()
-    pub fn new(start: proc(), stack: &mut StackSegment) -> Context {
-        // FIXME #7767: Putting main into a ~ so it's a thin pointer and can
-        // be passed to the spawn function.  Another unfortunate
-        // allocation
-        let start = ~start;
-
-        // The C-ABI function that is the task entry point
-        extern fn task_start_wrapper(f: &proc()) {
-            // XXX(pcwalton): This may be sketchy.
-            unsafe {
-                let f: &|| = transmute(f);
-                (*f)()
-            }
-        }
-
-        let fp: *c_void = task_start_wrapper as *c_void;
-        let argp: *c_void = unsafe { transmute::<&proc(), *c_void>(&*start) };
-        let sp: *uint = stack.end();
-        let sp: *mut uint = unsafe { transmute_mut_unsafe(sp) };
-        // Save and then immediately load the current context,
-        // which we will then modify to call the given function when restored
-        let mut regs = new_regs();
-        unsafe {
-            rust_swap_registers(transmute_mut_region(&mut *regs), transmute_region(&*regs));
-        };
-
-        initialize_call_frame(&mut *regs, fp, argp, sp);
-
-        // Scheduler tasks don't have a stack in the "we allocated it" sense,
-        // but rather they run on pthreads stacks. We have complete control over
-        // them in terms of the code running on them (and hopefully they don't
-        // overflow). Additionally, their coroutine stacks are listed as being
-        // zero-length, so that's how we detect what's what here.
-        let stack_base: *uint = stack.start();
-        let bounds = if sp as uint == stack_base as uint {
-            None
-        } else {
-            Some((stack_base as uint, sp as uint))
-        };
-        return Context {
-            start: Some(start),
-            regs: regs,
-            stack_bounds: bounds,
-        }
-    }
-
-    /* Switch contexts
-
-    Suspend the current execution context and resume another by
-    saving the registers values of the executing thread to a Context
-    then loading the registers from a previously saved Context.
-    */
-    pub fn swap(out_context: &mut Context, in_context: &Context) {
-        rtdebug!("swapping contexts");
-        let out_regs: &mut Registers = match out_context {
-            &Context { regs: ~ref mut r, .. } => r
-        };
-        let in_regs: &Registers = match in_context {
-            &Context { regs: ~ref r, .. } => r
-        };
-
-        rtdebug!("noting the stack limit and doing raw swap");
-
-        unsafe {
-            // Right before we switch to the new context, set the new context's
-            // stack limit in the OS-specified TLS slot. This also  means that
-            // we cannot call any more rust functions after record_stack_bounds
-            // returns because they would all likely fail due to the limit being
-            // invalid for the current task. Lucky for us `rust_swap_registers`
-            // is a C function so we don't have to worry about that!
-            match in_context.stack_bounds {
-                Some((lo, hi)) => record_stack_bounds(lo, hi),
-                // If we're going back to one of the original contexts or
-                // something that's possibly not a "normal task", then reset
-                // the stack limit to 0 to make morestack never fail
-                None => record_stack_bounds(0, uint::max_value),
-            }
-            rust_swap_registers(out_regs, in_regs)
-        }
-    }
-}
-
-extern {
-    fn rust_swap_registers(out_regs: *mut Registers, in_regs: *Registers);
-}
-
-// Register contexts used in various architectures
-//
-// These structures all represent a context of one task throughout its
-// execution. Each struct is a representation of the architecture's register
-// set. When swapping between tasks, these register sets are used to save off
-// the current registers into one struct, and load them all from another.
-//
-// Note that this is only used for context switching, which means that some of
-// the registers may go unused. For example, for architectures with
-// callee/caller saved registers, the context will only reflect the callee-saved
-// registers. This is because the caller saved registers are already stored
-// elsewhere on the stack (if it was necessary anyway).
-//
-// Additionally, there may be fields on various architectures which are unused
-// entirely because they only reflect what is theoretically possible for a
-// "complete register set" to show, but user-space cannot alter these registers.
-// An example of this would be the segment selectors for x86.
-//
-// These structures/functions are roughly in-sync with the source files inside
-// of src/rt/arch/$arch. The only currently used function from those folders is
-// the `rust_swap_registers` function, but that's only because for now segmented
-// stacks are disabled.
-
-#[cfg(target_arch = "x86")]
-struct Registers {
-    eax: u32, ebx: u32, ecx: u32, edx: u32,
-    ebp: u32, esi: u32, edi: u32, esp: u32,
-    cs: u16, ds: u16, ss: u16, es: u16, fs: u16, gs: u16,
-    eflags: u32, eip: u32
-}
-
-#[cfg(target_arch = "x86")]
-fn new_regs() -> ~Registers {
-    ~Registers {
-        eax: 0, ebx: 0, ecx: 0, edx: 0,
-        ebp: 0, esi: 0, edi: 0, esp: 0,
-        cs: 0, ds: 0, ss: 0, es: 0, fs: 0, gs: 0,
-        eflags: 0, eip: 0
-    }
-}
-
-#[cfg(target_arch = "x86")]
-fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void,
-                         sp: *mut uint) {
-
-    let sp = align_down(sp);
-    let sp = mut_offset(sp, -4);
-
-    unsafe { *sp = arg as uint };
-    let sp = mut_offset(sp, -1);
-    unsafe { *sp = 0 }; // The final return address
-
-    regs.esp = sp as u32;
-    regs.eip = fptr as u32;
-
-    // Last base pointer on the stack is 0
-    regs.ebp = 0;
-}
-
-// windows requires saving more registers (both general and XMM), so the windows
-// register context must be larger.
-#[cfg(windows, target_arch = "x86_64")]
-type Registers = [uint, ..34];
-#[cfg(not(windows), target_arch = "x86_64")]
-type Registers = [uint, ..22];
-
-#[cfg(windows, target_arch = "x86_64")]
-fn new_regs() -> ~Registers { ~([0, .. 34]) }
-#[cfg(not(windows), target_arch = "x86_64")]
-fn new_regs() -> ~Registers { ~([0, .. 22]) }
-
-#[cfg(target_arch = "x86_64")]
-fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void,
-                         sp: *mut uint) {
-
-    // Redefinitions from rt/arch/x86_64/regs.h
-    static RUSTRT_ARG0: uint = 3;
-    static RUSTRT_RSP: uint = 1;
-    static RUSTRT_IP: uint = 8;
-    static RUSTRT_RBP: uint = 2;
-
-    let sp = align_down(sp);
-    let sp = mut_offset(sp, -1);
-
-    // The final return address. 0 indicates the bottom of the stack
-    unsafe { *sp = 0; }
-
-    rtdebug!("creating call frame");
-    rtdebug!("fptr {}", fptr);
-    rtdebug!("arg {}", arg);
-    rtdebug!("sp {}", sp);
-
-    regs[RUSTRT_ARG0] = arg as uint;
-    regs[RUSTRT_RSP] = sp as uint;
-    regs[RUSTRT_IP] = fptr as uint;
-
-    // Last base pointer on the stack should be 0
-    regs[RUSTRT_RBP] = 0;
-}
-
-#[cfg(target_arch = "arm")]
-type Registers = [uint, ..32];
-
-#[cfg(target_arch = "arm")]
-fn new_regs() -> ~Registers { ~([0, .. 32]) }
-
-#[cfg(target_arch = "arm")]
-fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void,
-                         sp: *mut uint) {
-    let sp = align_down(sp);
-    // sp of arm eabi is 8-byte aligned
-    let sp = mut_offset(sp, -2);
-
-    // The final return address. 0 indicates the bottom of the stack
-    unsafe { *sp = 0; }
-
-    regs[0] = arg as uint;   // r0
-    regs[13] = sp as uint;   // #53 sp, r13
-    regs[14] = fptr as uint; // #60 pc, r15 --> lr
-}
-
-#[cfg(target_arch = "mips")]
-type Registers = [uint, ..32];
-
-#[cfg(target_arch = "mips")]
-fn new_regs() -> ~Registers { ~([0, .. 32]) }
-
-#[cfg(target_arch = "mips")]
-fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void,
-                         sp: *mut uint) {
-    let sp = align_down(sp);
-    // sp of mips o32 is 8-byte aligned
-    let sp = mut_offset(sp, -2);
-
-    // The final return address. 0 indicates the bottom of the stack
-    unsafe { *sp = 0; }
-
-    regs[4] = arg as uint;
-    regs[29] = sp as uint;
-    regs[25] = fptr as uint;
-    regs[31] = fptr as uint;
-}
-
-fn align_down(sp: *mut uint) -> *mut uint {
-    unsafe {
-        let sp: uint = transmute(sp);
-        let sp = sp & !(16 - 1);
-        transmute::<uint, *mut uint>(sp)
-    }
-}
-
-// ptr::mut_offset is positive ints only
-#[inline]
-pub fn mut_offset<T>(ptr: *mut T, count: int) -> *mut T {
-    use mem::size_of;
-    (ptr as int + count * (size_of::<T>() as int)) as *mut T
-}
-
-#[inline(always)]
-pub unsafe fn record_stack_bounds(stack_lo: uint, stack_hi: uint) {
-    // When the old runtime had segmented stacks, it used a calculation that was
-    // "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
-    // symbol resolution, llvm function calls, etc. In theory this red zone
-    // value is 0, but it matters far less when we have gigantic stacks because
-    // we don't need to be so exact about our stack budget. The "fudge factor"
-    // was because LLVM doesn't emit a stack check for functions < 256 bytes in
-    // size. Again though, we have giant stacks, so we round all these
-    // calculations up to the nice round number of 20k.
-    record_sp_limit(stack_lo + RED_ZONE);
-
-    return target_record_stack_bounds(stack_lo, stack_hi);
-
-    #[cfg(not(windows))] #[cfg(not(target_arch = "x86_64"))] #[inline(always)]
-    unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
-    #[cfg(windows, target_arch = "x86_64")] #[inline(always)]
-    unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
-        // Windows compiles C functions which may check the stack bounds. This
-        // means that if we want to perform valid FFI on windows, then we need
-        // to ensure that the stack bounds are what they truly are for this
-        // task. More info can be found at:
-        //   https://github.com/mozilla/rust/issues/3445#issuecomment-26114839
-        //
-        // stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
-        asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
-        asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
-    }
-}
-
-/// Records the current limit of the stack as specified by `end`.
-///
-/// This is stored in an OS-dependent location, likely inside of the thread
-/// local storage. The location that the limit is stored is a pre-ordained
-/// location because it's where LLVM has emitted code to check.
-///
-/// Note that this cannot be called under normal circumstances. This function is
-/// changing the stack limit, so upon returning any further function calls will
-/// possibly be triggering the morestack logic if you're not careful.
-///
-/// Also note that this and all of the inside functions are all flagged as
-/// "inline(always)" because they're messing around with the stack limits.  This
-/// would be unfortunate for the functions themselves to trigger a morestack
-/// invocation (if they were an actual function call).
-#[inline(always)]
-pub unsafe fn record_sp_limit(limit: uint) {
-    return target_record_sp_limit(limit);
-
-    // x86-64
-    #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
-    unsafe fn target_record_sp_limit(limit: uint) {
-        asm!("movq $$0x60+90*8, %rsi
-              movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
-    }
-    #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
-    unsafe fn target_record_sp_limit(limit: uint) {
-        asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
-    }
-    #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
-    unsafe fn target_record_sp_limit(limit: uint) {
-        // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
-        // store this inside of the "arbitrary data slot", but double the size
-        // because this is 64 bit instead of 32 bit
-        asm!("movq $0, %gs:0x28" :: "r"(limit) :: "volatile")
-    }
-    #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
-    unsafe fn target_record_sp_limit(limit: uint) {
-        asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
-    }
-
-    // x86
-    #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
-    unsafe fn target_record_sp_limit(limit: uint) {
-        asm!("movl $$0x48+90*4, %eax
-              movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
-    }
-    #[cfg(target_arch = "x86", target_os = "linux")]
-    #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
-    unsafe fn target_record_sp_limit(limit: uint) {
-        asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
-    }
-    #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
-    unsafe fn target_record_sp_limit(limit: uint) {
-        // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
-        // store this inside of the "arbitrary data slot"
-        asm!("movl $0, %fs:0x14" :: "r"(limit) :: "volatile")
-    }
-
-    // mips, arm - Some brave soul can port these to inline asm, but it's over
-    //             my head personally
-    #[cfg(target_arch = "mips")]
-    #[cfg(target_arch = "arm")] #[inline(always)]
-    unsafe fn target_record_sp_limit(limit: uint) {
-        return record_sp_limit(limit as *c_void);
-        extern {
-            fn record_sp_limit(limit: *c_void);
-        }
-    }
-}
-
-/// The counterpart of the function above, this function will fetch the current
-/// stack limit stored in TLS.
-///
-/// Note that all of these functions are meant to be exact counterparts of their
-/// brethren above, except that the operands are reversed.
-///
-/// As with the setter, this function does not have a __morestack header and can
-/// therefore be called in a "we're out of stack" situation.
-#[inline(always)]
-// currently only called by `rust_stack_exhausted`, which doesn't
-// exist in a test build.
-#[cfg(not(test))]
-pub unsafe fn get_sp_limit() -> uint {
-    return target_get_sp_limit();
-
-    // x86-64
-    #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
-    unsafe fn target_get_sp_limit() -> uint {
-        let limit;
-        asm!("movq $$0x60+90*8, %rsi
-              movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
-        return limit;
-    }
-    #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
-    unsafe fn target_get_sp_limit() -> uint {
-        let limit;
-        asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
-        return limit;
-    }
-    #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
-    unsafe fn target_get_sp_limit() -> uint {
-        let limit;
-        asm!("movq %gs:0x28, $0" : "=r"(limit) ::: "volatile");
-        return limit;
-    }
-    #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
-    unsafe fn target_get_sp_limit() -> uint {
-        let limit;
-        asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
-        return limit;
-    }
-
-    // x86
-    #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
-    unsafe fn target_get_sp_limit() -> uint {
-        let limit;
-        asm!("movl $$0x48+90*4, %eax
-              movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
-        return limit;
-    }
-    #[cfg(target_arch = "x86", target_os = "linux")]
-    #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
-    unsafe fn target_get_sp_limit() -> uint {
-        let limit;
-        asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
-        return limit;
-    }
-    #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
-    unsafe fn target_get_sp_limit() -> uint {
-        let limit;
-        asm!("movl %fs:0x14, $0" : "=r"(limit) ::: "volatile");
-        return limit;
-    }
-
-    // mips, arm - Some brave soul can port these to inline asm, but it's over
-    //             my head personally
-    #[cfg(target_arch = "mips")]
-    #[cfg(target_arch = "arm")] #[inline(always)]
-    unsafe fn target_get_sp_limit() -> uint {
-        return get_sp_limit() as uint;
-        extern {
-            fn get_sp_limit() -> *c_void;
-        }
-    }
-}
diff --git a/src/libstd/rt/crate_map.rs b/src/libstd/rt/crate_map.rs
index 22fc3f0ab56..d9b40cfbb6e 100644
--- a/src/libstd/rt/crate_map.rs
+++ b/src/libstd/rt/crate_map.rs
@@ -30,7 +30,7 @@ pub struct CrateMap<'a> {
     version: i32,
     entries: &'a [ModEntry<'a>],
     children: &'a [&'a CrateMap<'a>],
-    event_loop_factory: Option<extern "C" fn() -> ~EventLoop>,
+    event_loop_factory: Option<fn() -> ~EventLoop>,
 }
 
 #[cfg(not(windows))]
diff --git a/src/libstd/rt/env.rs b/src/libstd/rt/env.rs
index d1bd450afe2..f3fa482b18c 100644
--- a/src/libstd/rt/env.rs
+++ b/src/libstd/rt/env.rs
@@ -17,7 +17,7 @@ use os;
 // Note that these are all accessed without any synchronization.
 // They are expected to be initialized once then left alone.
 
-static mut MIN_STACK: uint = 2000000;
+static mut MIN_STACK: uint = 2 * 1024 * 1024;
 static mut DEBUG_BORROW: bool = false;
 static mut POISON_ON_FREE: bool = false;
 
diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs
deleted file mode 100644
index f4f128cf5aa..00000000000
--- a/src/libstd/rt/kill.rs
+++ /dev/null
@@ -1,318 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/*!
-
-Task death: asynchronous killing, linked failure, exit code propagation.
-
-This file implements two orthogonal building-blocks for communicating failure
-between tasks. One is 'linked failure' or 'task killing', that is, a failing
-task causing other tasks to fail promptly (even those that are blocked on
-pipes or I/O). The other is 'exit code propagation', which affects the result
-observed by the parent of a task::try task that itself spawns child tasks
-(such as any #[test] function). In both cases the data structures live in
-KillHandle.
-
-
-I. Task killing.
-
-The model for killing involves two atomic flags, the "kill flag" and the
-"unkillable flag". Operations on the kill flag include:
-
-- In the taskgroup code (task/spawn.rs), tasks store a clone of their
-  KillHandle in their shared taskgroup. Another task in the group that fails
-  will use that handle to call kill().
-- When a task blocks, it turns its ~Task into a BlockedTask by storing a
-  the transmuted ~Task pointer inside the KillHandle's kill flag. A task
-  trying to block and a task trying to kill it can simultaneously access the
-  kill flag, after which the task will get scheduled and fail (no matter who
-  wins the race). Likewise, a task trying to wake a blocked task normally and
-  a task trying to kill it can simultaneously access the flag; only one will
-  get the task to reschedule it.
-
-Operations on the unkillable flag include:
-
-- When a task becomes unkillable, it swaps on the flag to forbid any killer
-  from waking it up while it's blocked inside the unkillable section. If a
-  kill was already pending, the task fails instead of becoming unkillable.
-- When a task is done being unkillable, it restores the flag to the normal
-  running state. If a kill was received-but-blocked during the unkillable
-  section, the task fails at this later point.
-- When a task tries to kill another task, before swapping on the kill flag, it
-  first swaps on the unkillable flag, to see if it's "allowed" to wake up the
-  task. If it isn't, the killed task will receive the signal when it becomes
-  killable again. (Of course, a task trying to wake the task normally (e.g.
-  sending on a channel) does not access the unkillable flag at all.)
-
-Why do we not need acquire/release barriers on any of the kill flag swaps?
-This is because barriers establish orderings between accesses on different
-memory locations, but each kill-related operation is only a swap on a single
-location, so atomicity is all that matters. The exception is kill(), which
-does a swap on both flags in sequence. kill() needs no barriers because it
-does not matter if its two accesses are seen reordered on another CPU: if a
-killer does perform both writes, it means it saw a KILL_RUNNING in the
-unkillable flag, which means an unkillable task will see KILL_KILLED and fail
-immediately (rendering the subsequent write to the kill flag unnecessary).
-
-
-II. Exit code propagation.
-
-The basic model for exit code propagation, which is used with the "watched"
-spawn mode (on by default for linked spawns, off for supervised and unlinked
-spawns), is that a parent will wait for all its watched children to exit
-before reporting whether it succeeded or failed. A watching parent will only
-report success if it succeeded and all its children also reported success;
-otherwise, it will report failure. This is most useful for writing test cases:
-
- ```
-#[test]
-fn test_something_in_another_task {
-    do spawn {
-        assert!(collatz_conjecture_is_false());
-    }
-}
- ```
-
-Here, as the child task will certainly outlive the parent task, we might miss
-the failure of the child when deciding whether or not the test case passed.
-The watched spawn mode avoids this problem.
-
-In order to propagate exit codes from children to their parents, any
-'watching' parent must wait for all of its children to exit before it can
-report its final exit status. We achieve this by using an UnsafeArc, using the
-reference counting to track how many children are still alive, and using the
-unwrap() operation in the parent's exit path to wait for all children to exit.
-The UnsafeArc referred to here is actually the KillHandle itself.
-
-This also works transitively, as if a "middle" watched child task is itself
-watching a grandchild task, the "middle" task will do unwrap() on its own
-KillHandle (thereby waiting for the grandchild to exit) before dropping its
-reference to its watching parent (which will alert the parent).
-
-While UnsafeArc::unwrap() accomplishes the synchronization, there remains the
-matter of reporting the exit codes themselves. This is easiest when an exiting
-watched task has no watched children of its own:
-
-- If the task with no watched children exits successfully, it need do nothing.
-- If the task with no watched children has failed, it sets a flag in the
-  parent's KillHandle ("any_child_failed") to false. It then stays false forever.
-
-However, if a "middle" watched task with watched children of its own exits
-before its child exits, we need to ensure that the grandparent task may still
-see a failure from the grandchild task. While we could achieve this by having
-each intermediate task block on its handle, this keeps around the other resources
-the task was using. To be more efficient, this is accomplished via "tombstones".
-
-A tombstone is a closure, proc() -> bool, which will perform any waiting necessary
-to collect the exit code of descendant tasks. In its environment is captured
-the KillHandle of whichever task created the tombstone, and perhaps also any
-tombstones that that task itself had, and finally also another tombstone,
-effectively creating a lazy-list of heap closures.
-
-When a child wishes to exit early and leave tombstones behind for its parent,
-it must use a LittleLock (pthread mutex) to synchronize with any possible
-sibling tasks which are trying to do the same thing with the same parent.
-However, on the other side, when the parent is ready to pull on the tombstones,
-it need not use this lock, because the unwrap() serves as a barrier that ensures
-no children will remain with references to the handle.
-
-The main logic for creating and assigning tombstones can be found in the
-function reparent_children_to() in the impl for KillHandle.
-
-
-IIA. Issues with exit code propagation.
-
-There are two known issues with the current scheme for exit code propagation.
-
-- As documented in issue #8136, the structure mandates the possibility for stack
-  overflow when collecting tombstones that are very deeply nested. This cannot
-  be avoided with the closure representation, as tombstones end up structured in
-  a sort of tree. However, notably, the tombstones do not actually need to be
-  collected in any particular order, and so a doubly-linked list may be used.
-  However we do not do this yet because DList is in libextra.
-
-- A discussion with Graydon made me realize that if we decoupled the exit code
-  propagation from the parents-waiting action, this could result in a simpler
-  implementation as the exit codes themselves would not have to be propagated,
-  and could instead be propagated implicitly through the taskgroup mechanism
-  that we already have. The tombstoning scheme would still be required. I have
-  not implemented this because currently we can't receive a linked failure kill
-  signal during the task cleanup activity, as that is currently "unkillable",
-  and occurs outside the task's unwinder's "try" block, so would require some
-  restructuring.
-
-*/
-
-use cast;
-use option::{Option, Some, None};
-use prelude::*;
-use iter;
-use task::TaskResult;
-use rt::task::Task;
-use unstable::atomics::{AtomicUint, SeqCst};
-use unstable::sync::UnsafeArc;
-
-/// A handle to a blocked task. Usually this means having the ~Task pointer by
-/// ownership, but if the task is killable, a killer can steal it at any time.
-pub enum BlockedTask {
-    Owned(~Task),
-    Shared(UnsafeArc<AtomicUint>),
-}
-
-/// Per-task state related to task death, killing, failure, etc.
-pub struct Death {
-    // Action to be done with the exit code. If set, also makes the task wait
-    // until all its watched children exit before collecting the status.
-    on_exit:         Option<proc(TaskResult)>,
-    // nesting level counter for unstable::atomically calls (0 == can deschedule).
-    priv wont_sleep:      int,
-}
-
-pub struct BlockedTaskIterator {
-    priv inner: UnsafeArc<AtomicUint>,
-}
-
-impl Iterator<BlockedTask> for BlockedTaskIterator {
-    fn next(&mut self) -> Option<BlockedTask> {
-        Some(Shared(self.inner.clone()))
-    }
-}
-
-impl BlockedTask {
-    /// Returns Some if the task was successfully woken; None if already killed.
-    pub fn wake(self) -> Option<~Task> {
-        match self {
-            Owned(task) => Some(task),
-            Shared(arc) => unsafe {
-                match (*arc.get()).swap(0, SeqCst) {
-                    0 => None,
-                    n => cast::transmute(n),
-                }
-            }
-        }
-    }
-
-    /// Create a blocked task, unless the task was already killed.
-    pub fn block(task: ~Task) -> BlockedTask {
-        Owned(task)
-    }
-
-    /// Converts one blocked task handle to a list of many handles to the same.
-    pub fn make_selectable(self, num_handles: uint)
-        -> iter::Take<BlockedTaskIterator>
-    {
-        let arc = match self {
-            Owned(task) => {
-                let flag = unsafe { AtomicUint::new(cast::transmute(task)) };
-                UnsafeArc::new(flag)
-            }
-            Shared(arc) => arc.clone(),
-        };
-        BlockedTaskIterator{ inner: arc }.take(num_handles)
-    }
-
-    // This assertion has two flavours because the wake involves an atomic op.
-    // In the faster version, destructors will fail dramatically instead.
-    #[inline] #[cfg(not(test))]
-    pub fn assert_already_awake(self) { }
-    #[inline] #[cfg(test)]
-    pub fn assert_already_awake(self) { assert!(self.wake().is_none()); }
-
-    /// Convert to an unsafe uint value. Useful for storing in a pipe's state flag.
-    #[inline]
-    pub unsafe fn cast_to_uint(self) -> uint {
-        match self {
-            Owned(task) => {
-                let blocked_task_ptr: uint = cast::transmute(task);
-                rtassert!(blocked_task_ptr & 0x1 == 0);
-                blocked_task_ptr
-            }
-            Shared(arc) => {
-                let blocked_task_ptr: uint = cast::transmute(~arc);
-                rtassert!(blocked_task_ptr & 0x1 == 0);
-                blocked_task_ptr | 0x1
-            }
-        }
-    }
-
-    /// Convert from an unsafe uint value. Useful for retrieving a pipe's state flag.
-    #[inline]
-    pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
-        if blocked_task_ptr & 0x1 == 0 {
-            Owned(cast::transmute(blocked_task_ptr))
-        } else {
-            let ptr: ~UnsafeArc<AtomicUint> = cast::transmute(blocked_task_ptr & !1);
-            Shared(*ptr)
-        }
-    }
-}
-
-impl Death {
-    pub fn new() -> Death {
-        Death {
-            on_exit:         None,
-            wont_sleep:      0,
-        }
-    }
-
-    /// Collect failure exit codes from children and propagate them to a parent.
-    pub fn collect_failure(&mut self, result: TaskResult) {
-        match self.on_exit.take() {
-            Some(f) => f(result),
-            None => {}
-        }
-    }
-
-    /// Enter a possibly-nested "atomic" section of code. Just for assertions.
-    /// All calls must be paired with a subsequent call to allow_deschedule.
-    #[inline]
-    pub fn inhibit_deschedule(&mut self) {
-        self.wont_sleep += 1;
-    }
-
-    /// Exit a possibly-nested "atomic" section of code. Just for assertions.
-    /// All calls must be paired with a preceding call to inhibit_deschedule.
-    #[inline]
-    pub fn allow_deschedule(&mut self) {
-        rtassert!(self.wont_sleep != 0);
-        self.wont_sleep -= 1;
-    }
-
-    /// Ensure that the task is allowed to become descheduled.
-    #[inline]
-    pub fn assert_may_sleep(&self) {
-        if self.wont_sleep != 0 {
-            rtabort!("illegal atomic-sleep: attempt to reschedule while \
-                      using an Exclusive or LittleLock");
-        }
-    }
-}
-
-impl Drop for Death {
-    fn drop(&mut self) {
-        // Mustn't be in an atomic or unkillable section at task death.
-        rtassert!(self.wont_sleep == 0);
-    }
-}
-
-#[cfg(test)]
-mod test {
-    use rt::test::*;
-    use super::*;
-
-    // Task blocking tests
-
-    #[test]
-    fn block_and_wake() {
-        do with_test_task |task| {
-            BlockedTask::block(task).wake().unwrap()
-        }
-    }
-}
diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs
index d73ad98a25b..1c04b6b43ce 100644
--- a/src/libstd/rt/local.rs
+++ b/src/libstd/rt/local.rs
@@ -8,8 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use option::{Option, Some, None};
-use rt::sched::Scheduler;
+use option::Option;
 use rt::task::Task;
 use rt::local_ptr;
 
@@ -46,87 +45,10 @@ impl Local<local_ptr::Borrowed<Task>> for Task {
     }
 }
 
-/// Encapsulates a temporarily-borrowed scheduler.
-pub struct BorrowedScheduler {
-    priv task: local_ptr::Borrowed<Task>,
-}
-
-impl BorrowedScheduler {
-    fn new(mut task: local_ptr::Borrowed<Task>) -> BorrowedScheduler {
-        if task.get().sched.is_none() {
-            rtabort!("no scheduler")
-        } else {
-            BorrowedScheduler {
-                task: task,
-            }
-        }
-    }
-
-    #[inline]
-    pub fn get<'a>(&'a mut self) -> &'a mut ~Scheduler {
-        match self.task.get().sched {
-            None => rtabort!("no scheduler"),
-            Some(ref mut sched) => sched,
-        }
-    }
-}
-
-impl Local<BorrowedScheduler> for Scheduler {
-    fn put(value: ~Scheduler) {
-        let mut task = Local::borrow(None::<Task>);
-        task.get().sched = Some(value);
-    }
-    #[inline]
-    fn take() -> ~Scheduler {
-        unsafe {
-            // XXX: Unsafe for speed
-            let task: *mut Task = Local::unsafe_borrow();
-            (*task).sched.take_unwrap()
-        }
-    }
-    fn exists(_: Option<Scheduler>) -> bool {
-        let mut task = Local::borrow(None::<Task>);
-        task.get().sched.is_some()
-    }
-    #[inline]
-    fn borrow(_: Option<Scheduler>) -> BorrowedScheduler {
-        BorrowedScheduler::new(Local::borrow(None::<Task>))
-    }
-    unsafe fn unsafe_take() -> ~Scheduler { rtabort!("unimpl") }
-    unsafe fn unsafe_borrow() -> *mut Scheduler {
-        let task: *mut Task = Local::unsafe_borrow();
-        match (*task).sched {
-            Some(~ref mut sched) => {
-                let s: *mut Scheduler = &mut *sched;
-                return s;
-            }
-            None => {
-                rtabort!("no scheduler")
-            }
-        }
-    }
-    unsafe fn try_unsafe_borrow() -> Option<*mut Scheduler> {
-        let task_opt: Option<*mut Task> = Local::try_unsafe_borrow();
-        match task_opt {
-            Some(task) => {
-                match (*task).sched {
-                    Some(~ref mut sched) => {
-                        let s: *mut Scheduler = &mut *sched;
-                        Some(s)
-                    }
-                    None => None
-                }
-            }
-            None => None
-        }
-    }
-}
-
 #[cfg(test)]
 mod test {
     use option::None;
     use unstable::run_in_bare_thread;
-    use rt::test::*;
     use super::*;
     use rt::task::Task;
     use rt::local_ptr;
@@ -135,8 +57,7 @@ mod test {
     fn thread_local_task_smoke_test() {
         do run_in_bare_thread {
             local_ptr::init();
-            let mut sched = ~new_test_uv_sched();
-            let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){});
+            let task = ~Task::new();
             Local::put(task);
             let task: ~Task = Local::take();
             cleanup_task(task);
@@ -147,12 +68,11 @@ mod test {
     fn thread_local_task_two_instances() {
         do run_in_bare_thread {
             local_ptr::init();
-            let mut sched = ~new_test_uv_sched();
-            let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){});
+            let task = ~Task::new();
             Local::put(task);
             let task: ~Task = Local::take();
             cleanup_task(task);
-            let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){});
+            let task = ~Task::new();
             Local::put(task);
             let task: ~Task = Local::take();
             cleanup_task(task);
@@ -164,8 +84,7 @@ mod test {
     fn borrow_smoke_test() {
         do run_in_bare_thread {
             local_ptr::init();
-            let mut sched = ~new_test_uv_sched();
-            let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){});
+            let task = ~Task::new();
             Local::put(task);
 
             unsafe {
@@ -180,8 +99,7 @@ mod test {
     fn borrow_with_return() {
         do run_in_bare_thread {
             local_ptr::init();
-            let mut sched = ~new_test_uv_sched();
-            let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){});
+            let task = ~Task::new();
             Local::put(task);
 
             {
@@ -193,5 +111,9 @@ mod test {
         }
     }
 
+    fn cleanup_task(mut t: ~Task) {
+        t.destroyed = true;
+    }
+
 }
 
diff --git a/src/libstd/rt/local_ptr.rs b/src/libstd/rt/local_ptr.rs
index 925aa802ad5..42cce272e44 100644
--- a/src/libstd/rt/local_ptr.rs
+++ b/src/libstd/rt/local_ptr.rs
@@ -42,7 +42,7 @@ impl<T> Drop for Borrowed<T> {
             }
             let val: ~T = cast::transmute(self.val);
             put::<T>(val);
-            assert!(exists());
+            rtassert!(exists());
         }
     }
 }
@@ -109,7 +109,9 @@ pub mod compiled {
     /// Does not validate the pointer type.
     #[inline]
     pub unsafe fn take<T>() -> ~T {
-        let ptr: ~T = cast::transmute(RT_TLS_PTR);
+        let ptr = RT_TLS_PTR;
+        rtassert!(!ptr.is_null());
+        let ptr: ~T = cast::transmute(ptr);
         // can't use `as`, due to type not matching with `cfg(test)`
         RT_TLS_PTR = cast::transmute(0);
         ptr
@@ -178,7 +180,7 @@ pub mod native {
     }
 
     pub unsafe fn cleanup() {
-        assert!(INITIALIZED);
+        rtassert!(INITIALIZED);
         tls::destroy(RT_TLS_KEY);
         LOCK.destroy();
         INITIALIZED = false;
diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs
index df1ebeb6407..0dd6c883d5b 100644
--- a/src/libstd/rt/mod.rs
+++ b/src/libstd/rt/mod.rs
@@ -57,27 +57,17 @@ Several modules in `core` are clients of `rt`:
 // XXX: this should not be here.
 #[allow(missing_doc)];
 
+use any::Any;
 use clone::Clone;
 use container::Container;
 use iter::Iterator;
-use option::{Option, None, Some};
+use option::Option;
 use ptr::RawPtr;
-use rt::local::Local;
-use rt::sched::{Scheduler, Shutdown};
-use rt::sleeper_list::SleeperList;
-use task::TaskResult;
-use rt::task::{Task, SchedTask, GreenTask, Sched};
-use send_str::SendStrStatic;
-use unstable::atomics::{AtomicInt, AtomicBool, SeqCst};
-use unstable::sync::UnsafeArc;
+use result::Result;
+use task::TaskOpts;
 use vec::{OwnedVector, MutableVector, ImmutableVector};
-use vec;
 
-use self::thread::Thread;
-
-// the os module needs to reach into this helper, so allow general access
-// through this reexport.
-pub use self::util::set_exit_status;
+use self::task::{Task, BlockedTask};
 
 // this is somewhat useful when a program wants to spawn a "reasonable" number
 // of workers based on the constraints of the system that it's running on.
@@ -85,8 +75,8 @@ pub use self::util::set_exit_status;
 // method...
 pub use self::util::default_sched_threads;
 
-// Re-export of the functionality in the kill module
-pub use self::kill::BlockedTask;
+// Export unwinding facilities used by the failure macros
+pub use self::unwind::{begin_unwind, begin_unwind_raw};
 
 // XXX: these probably shouldn't be public...
 #[doc(hidden)]
@@ -99,21 +89,12 @@ pub mod shouldnt_be_public {
 // Internal macros used by the runtime.
 mod macros;
 
-/// Basic implementation of an EventLoop, provides no I/O interfaces
-mod basic;
-
 /// The global (exchange) heap.
 pub mod global_heap;
 
 /// Implementations of language-critical runtime features like @.
 pub mod task;
 
-/// Facilities related to task failure, killing, and death.
-mod kill;
-
-/// The coroutine task scheduler, built on the `io` event loop.
-pub mod sched;
-
 /// The EventLoop and internal synchronous I/O interface.
 pub mod rtio;
 
@@ -121,27 +102,6 @@ pub mod rtio;
 /// or task-local storage.
 pub mod local;
 
-/// A mostly lock-free multi-producer, single consumer queue.
-pub mod mpsc_queue;
-
-/// A lock-free single-producer, single consumer queue.
-pub mod spsc_queue;
-
-/// A lock-free multi-producer, multi-consumer bounded queue.
-mod mpmc_bounded_queue;
-
-/// A parallel work-stealing deque
-pub mod deque;
-
-/// A parallel data structure for tracking sleeping schedulers.
-pub mod sleeper_list;
-
-/// Stack segments and caching.
-pub mod stack;
-
-/// CPU context swapping.
-mod context;
-
 /// Bindings to system threading libraries.
 pub mod thread;
 
@@ -157,16 +117,6 @@ pub mod logging;
 /// Crate map
 pub mod crate_map;
 
-/// Tools for testing the runtime
-pub mod test;
-
-/// Reference counting
-pub mod rc;
-
-/// A simple single-threaded channel type for passing buffered data between
-/// scheduler and task context
-pub mod tube;
-
 /// The runtime needs to be able to put a pointer into thread-local storage.
 mod local_ptr;
 
@@ -185,41 +135,33 @@ pub mod args;
 // Support for dynamic borrowck
 pub mod borrowck;
 
-/// Set up a default runtime configuration, given compiler-supplied arguments.
-///
-/// This is invoked by the `start` _language item_ (unstable::lang) to
-/// run a Rust executable.
-///
-/// # Arguments
-///
-/// * `argc` & `argv` - The argument vector. On Unix this information is used
-///   by os::args.
-///
-/// # Return value
-///
-/// The return value is used as the process return code. 0 on success, 101 on error.
-pub fn start(argc: int, argv: **u8, main: proc()) -> int {
-
-    init(argc, argv);
-    let exit_code = run(main);
-    // unsafe is ok b/c we're sure that the runtime is gone
-    unsafe { cleanup(); }
-
-    return exit_code;
-}
+/// The default error code of the rust runtime if the main task fails instead
+/// of exiting cleanly.
+pub static DEFAULT_ERROR_CODE: int = 101;
 
-/// Like `start` but creates an additional scheduler on the current thread,
-/// which in most cases will be the 'main' thread, and pins the main task to it.
+/// The interface to the current runtime.
 ///
-/// This is appropriate for running code that must execute on the main thread,
-/// such as the platform event loop and GUI.
-pub fn start_on_main_thread(argc: int, argv: **u8, main: proc()) -> int {
-    init(argc, argv);
-    let exit_code = run_on_main_thread(main);
-    // unsafe is ok b/c we're sure that the runtime is gone
-    unsafe { cleanup(); }
-
-    return exit_code;
+/// This trait is used as the abstraction between 1:1 and M:N scheduling. The
+/// two independent crates, libnative and libgreen, both have objects which
+/// implement this trait. The goal of this trait is to encompass all the
+/// fundamental differences in functionality between the 1:1 and M:N runtime
+/// modes.
+pub trait Runtime {
+    // Necessary scheduling functions, used for channels and blocking I/O
+    // (sometimes).
+    fn yield_now(~self, cur_task: ~Task);
+    fn maybe_yield(~self, cur_task: ~Task);
+    fn deschedule(~self, times: uint, cur_task: ~Task,
+                  f: |BlockedTask| -> Result<(), BlockedTask>);
+    fn reawaken(~self, to_wake: ~Task, can_resched: bool);
+
+    // Miscellaneous calls which are very different depending on what context
+    // you're in.
+    fn spawn_sibling(~self, cur_task: ~Task, opts: TaskOpts, f: proc());
+    fn local_io<'a>(&'a mut self) -> Option<rtio::LocalIo<'a>>;
+
+    // XXX: This is a serious code smell and this should not exist at all.
+    fn wrap(~self) -> ~Any;
 }
 
 /// One-time runtime initialization.
@@ -234,6 +176,7 @@ pub fn init(argc: int, argv: **u8) {
         args::init(argc, argv);
         env::init();
         logging::init();
+        local_ptr::init();
     }
 }
 
@@ -250,239 +193,3 @@ pub unsafe fn cleanup() {
     args::cleanup();
     local_ptr::cleanup();
 }
-
-/// Execute the main function in a scheduler.
-///
-/// Configures the runtime according to the environment, by default
-/// using a task scheduler with the same number of threads as cores.
-/// Returns a process exit code.
-pub fn run(main: proc()) -> int {
-    run_(main, false)
-}
-
-pub fn run_on_main_thread(main: proc()) -> int {
-    run_(main, true)
-}
-
-fn run_(main: proc(), use_main_sched: bool) -> int {
-    static DEFAULT_ERROR_CODE: int = 101;
-
-    let nscheds = util::default_sched_threads();
-
-    let mut main = Some(main);
-
-    // The shared list of sleeping schedulers.
-    let sleepers = SleeperList::new();
-
-    // Create a work queue for each scheduler, ntimes. Create an extra
-    // for the main thread if that flag is set. We won't steal from it.
-    let mut pool = deque::BufferPool::new();
-    let arr = vec::from_fn(nscheds, |_| pool.deque());
-    let (workers, stealers) = vec::unzip(arr.move_iter());
-
-    // The schedulers.
-    let mut scheds = ~[];
-    // Handles to the schedulers. When the main task ends these will be
-    // sent the Shutdown message to terminate the schedulers.
-    let mut handles = ~[];
-
-    for worker in workers.move_iter() {
-        rtdebug!("inserting a regular scheduler");
-
-        // Every scheduler is driven by an I/O event loop.
-        let loop_ = new_event_loop();
-        let mut sched = ~Scheduler::new(loop_,
-                                        worker,
-                                        stealers.clone(),
-                                        sleepers.clone());
-        let handle = sched.make_handle();
-
-        scheds.push(sched);
-        handles.push(handle);
-    }
-
-    // If we need a main-thread task then create a main thread scheduler
-    // that will reject any task that isn't pinned to it
-    let main_sched = if use_main_sched {
-
-        // Create a friend handle.
-        let mut friend_sched = scheds.pop();
-        let friend_handle = friend_sched.make_handle();
-        scheds.push(friend_sched);
-
-        // This scheduler needs a queue that isn't part of the stealee
-        // set.
-        let (worker, _) = pool.deque();
-
-        let main_loop = new_event_loop();
-        let mut main_sched = ~Scheduler::new_special(main_loop,
-                                                     worker,
-                                                     stealers.clone(),
-                                                     sleepers.clone(),
-                                                     false,
-                                                     Some(friend_handle));
-        let mut main_handle = main_sched.make_handle();
-        // Allow the scheduler to exit when the main task exits.
-        // Note: sending the shutdown message also prevents the scheduler
-        // from pushing itself to the sleeper list, which is used for
-        // waking up schedulers for work stealing; since this is a
-        // non-work-stealing scheduler it should not be adding itself
-        // to the list.
-        main_handle.send(Shutdown);
-        Some(main_sched)
-    } else {
-        None
-    };
-
-    // Create a shared cell for transmitting the process exit
-    // code from the main task to this function.
-    let exit_code = UnsafeArc::new(AtomicInt::new(0));
-    let exit_code_clone = exit_code.clone();
-
-    // Used to sanity check that the runtime only exits once
-    let exited_already = UnsafeArc::new(AtomicBool::new(false));
-
-    // When the main task exits, after all the tasks in the main
-    // task tree, shut down the schedulers and set the exit code.
-    let handles = handles;
-    let on_exit: proc(TaskResult) = proc(exit_success) {
-        unsafe {
-            assert!(!(*exited_already.get()).swap(true, SeqCst),
-                    "the runtime already exited");
-        }
-
-        let mut handles = handles;
-        for handle in handles.mut_iter() {
-            handle.send(Shutdown);
-        }
-
-        unsafe {
-            let exit_code = if exit_success.is_ok() {
-                use rt::util;
-
-                // If we're exiting successfully, then return the global
-                // exit status, which can be set programmatically.
-                util::get_exit_status()
-            } else {
-                DEFAULT_ERROR_CODE
-            };
-            (*exit_code_clone.get()).store(exit_code, SeqCst);
-        }
-    };
-
-    let mut threads = ~[];
-    let mut on_exit = Some(on_exit);
-
-    if !use_main_sched {
-
-        // In the case where we do not use a main_thread scheduler we
-        // run the main task in one of our threads.
-
-        let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool,
-                                            None,
-                                            ::util::replace(&mut main,
-                                                            None).unwrap());
-        main_task.name = Some(SendStrStatic("<main>"));
-        main_task.death.on_exit = ::util::replace(&mut on_exit, None);
-
-        let sched = scheds.pop();
-        let main_task = main_task;
-        let thread = do Thread::start {
-            sched.bootstrap(main_task);
-        };
-        threads.push(thread);
-    }
-
-    // Run each remaining scheduler in a thread.
-    for sched in scheds.move_rev_iter() {
-        rtdebug!("creating regular schedulers");
-        let thread = do Thread::start {
-            let mut sched = sched;
-            let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || {
-                rtdebug!("boostraping a non-primary scheduler");
-            };
-            sched.bootstrap(bootstrap_task);
-        };
-        threads.push(thread);
-    }
-
-    // If we do have a main thread scheduler, run it now.
-
-    if use_main_sched {
-        rtdebug!("about to create the main scheduler task");
-
-        let mut main_sched = main_sched.unwrap();
-
-        let home = Sched(main_sched.make_handle());
-        let mut main_task = ~Task::new_root_homed(&mut main_sched.stack_pool,
-                                                  None,
-                                                  home,
-                                                  ::util::replace(&mut main,
-                                                                  None).
-                                                                  unwrap());
-        main_task.name = Some(SendStrStatic("<main>"));
-        main_task.death.on_exit = ::util::replace(&mut on_exit, None);
-        rtdebug!("bootstrapping main_task");
-
-        main_sched.bootstrap(main_task);
-    }
-
-    rtdebug!("waiting for threads");
-
-    // Wait for schedulers
-    for thread in threads.move_iter() {
-        thread.join();
-    }
-
-    // Return the exit code
-    unsafe {
-        (*exit_code.get()).load(SeqCst)
-    }
-}
-
-pub fn in_sched_context() -> bool {
-    unsafe {
-        let task_ptr: Option<*mut Task> = Local::try_unsafe_borrow();
-        match task_ptr {
-            Some(task) => {
-                match (*task).task_type {
-                    SchedTask => true,
-                    _ => false
-                }
-            }
-            None => false
-        }
-    }
-}
-
-pub fn in_green_task_context() -> bool {
-    unsafe {
-        let task: Option<*mut Task> = Local::try_unsafe_borrow();
-        match task {
-            Some(task) => {
-                match (*task).task_type {
-                    GreenTask(_) => true,
-                    _ => false
-                }
-            }
-            None => false
-        }
-    }
-}
-
-pub fn new_event_loop() -> ~rtio::EventLoop {
-    match crate_map::get_crate_map() {
-        None => {}
-        Some(map) => {
-            match map.event_loop_factory {
-                None => {}
-                Some(factory) => return factory()
-            }
-        }
-    }
-
-    // If the crate map didn't specify a factory to create an event loop, then
-    // instead just use a basic event loop missing all I/O services to at least
-    // get the scheduler running.
-    return basic::event_loop();
-}
diff --git a/src/libstd/rt/rc.rs b/src/libstd/rt/rc.rs
deleted file mode 100644
index 2699dab6d38..00000000000
--- a/src/libstd/rt/rc.rs
+++ /dev/null
@@ -1,139 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! An owned, task-local, reference counted type
-//!
-//! # Safety note
-//!
-//! XXX There is currently no type-system mechanism for enforcing that
-//! reference counted types are both allocated on the exchange heap
-//! and also non-sendable
-//!
-//! This doesn't prevent borrowing multiple aliasable mutable pointers
-
-use ops::Drop;
-use clone::Clone;
-use libc::c_void;
-use cast;
-
-pub struct RC<T> {
-    priv p: *c_void // ~(uint, T)
-}
-
-impl<T> RC<T> {
-    pub fn new(val: T) -> RC<T> {
-        unsafe {
-            let v = ~(1, val);
-            let p: *c_void = cast::transmute(v);
-            RC { p: p }
-        }
-    }
-
-    fn get_mut_state(&mut self) -> *mut (uint, T) {
-        unsafe {
-            let p: &mut ~(uint, T) = cast::transmute(&mut self.p);
-            let p: *mut (uint, T) = &mut **p;
-            return p;
-        }
-    }
-
-    fn get_state(&self) -> *(uint, T) {
-        unsafe {
-            let p: &~(uint, T) = cast::transmute(&self.p);
-            let p: *(uint, T) = &**p;
-            return p;
-        }
-    }
-
-    pub fn unsafe_borrow_mut(&mut self) -> *mut T {
-        unsafe {
-            match *self.get_mut_state() {
-                (_, ref mut p) => {
-                    let p: *mut T = p;
-                    return p;
-                }
-            }
-        }
-    }
-
-    pub fn refcount(&self) -> uint {
-        unsafe {
-            match *self.get_state() {
-                (count, _) => count
-            }
-        }
-    }
-}
-
-#[unsafe_destructor]
-impl<T> Drop for RC<T> {
-    fn drop(&mut self) {
-        assert!(self.refcount() > 0);
-
-        unsafe {
-            match *self.get_mut_state() {
-                (ref mut count, _) => {
-                    *count = *count - 1
-                }
-            }
-
-            if self.refcount() == 0 {
-                let _: ~(uint, T) = cast::transmute(self.p);
-            }
-        }
-    }
-}
-
-impl<T> Clone for RC<T> {
-    fn clone(&self) -> RC<T> {
-        unsafe {
-            // XXX: Mutable clone
-            let this: &mut RC<T> = cast::transmute_mut(self);
-
-            match *this.get_mut_state() {
-                (ref mut count, _) => {
-                    *count = *count + 1;
-                }
-            }
-        }
-
-        RC { p: self.p }
-    }
-}
-
-#[cfg(test)]
-mod test {
-    use super::RC;
-
-    #[test]
-    fn smoke_test() {
-        unsafe {
-            let mut v1 = RC::new(100);
-            assert!(*v1.unsafe_borrow_mut() == 100);
-            assert!(v1.refcount() == 1);
-
-            let mut v2 = v1.clone();
-            assert!(*v2.unsafe_borrow_mut() == 100);
-            assert!(v2.refcount() == 2);
-
-            *v2.unsafe_borrow_mut() = 200;
-            assert!(*v2.unsafe_borrow_mut() == 200);
-            assert!(*v1.unsafe_borrow_mut() == 200);
-
-            let v3 = v2.clone();
-            assert!(v3.refcount() == 3);
-            {
-                let _v1 = v1;
-                let _v2 = v2;
-            }
-            assert!(v3.refcount() == 1);
-        }
-    }
-}
diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs
index b54231421e3..6b3d50a76ac 100644
--- a/src/libstd/rt/rtio.rs
+++ b/src/libstd/rt/rtio.rs
@@ -14,14 +14,15 @@ use comm::{SharedChan, Port};
 use libc::c_int;
 use libc;
 use ops::Drop;
-use option::*;
+use option::{Option, Some, None};
 use path::Path;
-use result::*;
+use result::{Result, Ok, Err};
+use rt::task::Task;
+use rt::local::Local;
 
 use ai = io::net::addrinfo;
+use io;
 use io::IoError;
-use io::native::NATIVE_IO_FACTORY;
-use io::native;
 use io::net::ip::{IpAddr, SocketAddr};
 use io::process::{ProcessConfig, ProcessExit};
 use io::signal::Signum;
@@ -93,36 +94,52 @@ impl<'a> Drop for LocalIo<'a> {
 impl<'a> LocalIo<'a> {
     /// Returns the local I/O: either the local scheduler's I/O services or
     /// the native I/O services.
-    pub fn borrow() -> LocalIo {
-        use rt::sched::Scheduler;
-        use rt::local::Local;
+    pub fn borrow() -> Option<LocalIo> {
+        // FIXME(#11053): bad
+        //
+        // This is currently very unsafely implemented. We don't actually
+        // *take* the local I/O so there's a very real possibility that we
+        // can have two borrows at once. Currently there is not a clear way
+        // to actually borrow the local I/O factory safely because even if
+        // ownership were transferred down to the functions that the I/O
+        // factory implements it's just too much of a pain to know when to
+        // relinquish ownership back into the local task (but that would be
+        // the safe way of implementing this function).
+        //
+        // In order to get around this, we just transmute a copy out of the task
+        // in order to have what is likely a static lifetime (bad).
+        let mut t: ~Task = Local::take();
+        let ret = t.local_io().map(|t| {
+            unsafe { cast::transmute_copy(&t) }
+        });
+        Local::put(t);
+        return ret;
+    }
 
-        unsafe {
-            // First, attempt to use the local scheduler's I/O services
-            let sched: Option<*mut Scheduler> = Local::try_unsafe_borrow();
-            match sched {
-                Some(sched) => {
-                    match (*sched).event_loop.io() {
-                        Some(factory) => {
-                            return LocalIo {
-                                factory: factory,
-                            }
-                        }
-                        None => {}
+    pub fn maybe_raise<T>(f: |io: &mut IoFactory| -> Result<T, IoError>)
+        -> Option<T>
+    {
+        match LocalIo::borrow() {
+            None => {
+                io::io_error::cond.raise(io::standard_error(io::IoUnavailable));
+                None
+            }
+            Some(mut io) => {
+                match f(io.get()) {
+                    Ok(t) => Some(t),
+                    Err(ioerr) => {
+                        io::io_error::cond.raise(ioerr);
+                        None
                     }
                 }
-                None => {}
-            }
-            // If we don't have a scheduler or the scheduler doesn't have I/O
-            // services, then fall back to the native I/O services.
-            let native_io: &'static mut native::IoFactory =
-                &mut NATIVE_IO_FACTORY;
-            LocalIo {
-                factory: native_io as &mut IoFactory:'static
             }
         }
     }
 
+    pub fn new<'a>(io: &'a mut IoFactory) -> LocalIo<'a> {
+        LocalIo { factory: io }
+    }
+
     /// Returns the underlying I/O factory as a trait reference.
     #[inline]
     pub fn get<'a>(&'a mut self) -> &'a mut IoFactory {
diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs
deleted file mode 100644
index 15aa1602cd0..00000000000
--- a/src/libstd/rt/sched.rs
+++ /dev/null
@@ -1,1395 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use option::{Option, Some, None};
-use cast::{transmute, transmute_mut_region, transmute_mut_unsafe};
-use clone::Clone;
-use unstable::raw;
-use super::sleeper_list::SleeperList;
-use super::stack::{StackPool};
-use super::rtio::EventLoop;
-use super::context::Context;
-use super::task::{Task, AnySched, Sched};
-use rt::kill::BlockedTask;
-use rt::deque;
-use rt::local_ptr;
-use rt::local::Local;
-use rt::rtio::{RemoteCallback, PausableIdleCallback, Callback};
-use borrow::{to_uint};
-use rand::{XorShiftRng, Rng, Rand};
-use iter::range;
-use unstable::mutex::Mutex;
-use vec::{OwnedVector};
-
-use mpsc = super::mpsc_queue;
-
-/// A scheduler is responsible for coordinating the execution of Tasks
-/// on a single thread. The scheduler runs inside a slightly modified
-/// Rust Task. When not running this task is stored in the scheduler
-/// struct. The scheduler struct acts like a baton, all scheduling
-/// actions are transfers of the baton.
-///
-/// XXX: This creates too many callbacks to run_sched_once, resulting
-/// in too much allocation and too many events.
-pub struct Scheduler {
-    /// There are N work queues, one per scheduler.
-    work_queue: deque::Worker<~Task>,
-    /// Work queues for the other schedulers. These are created by
-    /// cloning the core work queues.
-    work_queues: ~[deque::Stealer<~Task>],
-    /// The queue of incoming messages from other schedulers.
-    /// These are enqueued by SchedHandles after which a remote callback
-    /// is triggered to handle the message.
-    message_queue: mpsc::Consumer<SchedMessage, ()>,
-    /// Producer used to clone sched handles from
-    message_producer: mpsc::Producer<SchedMessage, ()>,
-    /// A shared list of sleeping schedulers. We'll use this to wake
-    /// up schedulers when pushing work onto the work queue.
-    sleeper_list: SleeperList,
-    /// Indicates that we have previously pushed a handle onto the
-    /// SleeperList but have not yet received the Wake message.
-    /// Being `true` does not necessarily mean that the scheduler is
-    /// not active since there are multiple event sources that may
-    /// wake the scheduler. It just prevents the scheduler from pushing
-    /// multiple handles onto the sleeper list.
-    sleepy: bool,
-    /// A flag to indicate we've received the shutdown message and should
-    /// no longer try to go to sleep, but exit instead.
-    no_sleep: bool,
-    stack_pool: StackPool,
-    /// The scheduler runs on a special task. When it is not running
-    /// it is stored here instead of the work queue.
-    sched_task: Option<~Task>,
-    /// An action performed after a context switch on behalf of the
-    /// code running before the context switch
-    cleanup_job: Option<CleanupJob>,
-    /// Should this scheduler run any task, or only pinned tasks?
-    run_anything: bool,
-    /// If the scheduler shouldn't run some tasks, a friend to send
-    /// them to.
-    friend_handle: Option<SchedHandle>,
-    /// A fast XorShift rng for scheduler use
-    rng: XorShiftRng,
-    /// A togglable idle callback
-    idle_callback: Option<~PausableIdleCallback>,
-    /// A countdown that starts at a random value and is decremented
-    /// every time a yield check is performed. When it hits 0 a task
-    /// will yield.
-    yield_check_count: uint,
-    /// A flag to tell the scheduler loop it needs to do some stealing
-    /// in order to introduce randomness as part of a yield
-    steal_for_yield: bool,
-
-    // n.b. currently destructors of an object are run in top-to-bottom in order
-    //      of field declaration. Due to its nature, the pausable idle callback
-    //      must have some sort of handle to the event loop, so it needs to get
-    //      destroyed before the event loop itself. For this reason, we destroy
-    //      the event loop last to ensure that any unsafe references to it are
-    //      destroyed before it's actually destroyed.
-
-    /// The event loop used to drive the scheduler and perform I/O
-    event_loop: ~EventLoop,
-}
-
-/// An indication of how hard to work on a given operation, the difference
-/// mainly being whether memory is synchronized or not
-#[deriving(Eq)]
-enum EffortLevel {
-    DontTryTooHard,
-    GiveItYourBest
-}
-
-static MAX_YIELD_CHECKS: uint = 20000;
-
-fn reset_yield_check(rng: &mut XorShiftRng) -> uint {
-    let r: uint = Rand::rand(rng);
-    r % MAX_YIELD_CHECKS + 1
-}
-
-impl Scheduler {
-
-    // * Initialization Functions
-
-    pub fn new(event_loop: ~EventLoop,
-               work_queue: deque::Worker<~Task>,
-               work_queues: ~[deque::Stealer<~Task>],
-               sleeper_list: SleeperList)
-        -> Scheduler {
-
-        Scheduler::new_special(event_loop, work_queue,
-                               work_queues,
-                               sleeper_list, true, None)
-
-    }
-
-    pub fn new_special(event_loop: ~EventLoop,
-                       work_queue: deque::Worker<~Task>,
-                       work_queues: ~[deque::Stealer<~Task>],
-                       sleeper_list: SleeperList,
-                       run_anything: bool,
-                       friend: Option<SchedHandle>)
-        -> Scheduler {
-
-        let (consumer, producer) = mpsc::queue(());
-        let mut sched = Scheduler {
-            sleeper_list: sleeper_list,
-            message_queue: consumer,
-            message_producer: producer,
-            sleepy: false,
-            no_sleep: false,
-            event_loop: event_loop,
-            work_queue: work_queue,
-            work_queues: work_queues,
-            stack_pool: StackPool::new(),
-            sched_task: None,
-            cleanup_job: None,
-            run_anything: run_anything,
-            friend_handle: friend,
-            rng: new_sched_rng(),
-            idle_callback: None,
-            yield_check_count: 0,
-            steal_for_yield: false
-        };
-
-        sched.yield_check_count = reset_yield_check(&mut sched.rng);
-
-        return sched;
-    }
-
-    // XXX: This may eventually need to be refactored so that
-    // the scheduler itself doesn't have to call event_loop.run.
-    // That will be important for embedding the runtime into external
-    // event loops.
-
-    // Take a main task to run, and a scheduler to run it in. Create a
-    // scheduler task and bootstrap into it.
-    pub fn bootstrap(mut ~self, task: ~Task) {
-
-        // Build an Idle callback.
-        let cb = ~SchedRunner as ~Callback;
-        self.idle_callback = Some(self.event_loop.pausable_idle_callback(cb));
-
-        // Initialize the TLS key.
-        local_ptr::init();
-
-        // Create a task for the scheduler with an empty context.
-        let sched_task = ~Task::new_sched_task();
-
-        // Now that we have an empty task struct for the scheduler
-        // task, put it in TLS.
-        Local::put(sched_task);
-
-        // Before starting our first task, make sure the idle callback
-        // is active. As we do not start in the sleep state this is
-        // important.
-        self.idle_callback.get_mut_ref().resume();
-
-        // Now, as far as all the scheduler state is concerned, we are
-        // inside the "scheduler" context. So we can act like the
-        // scheduler and resume the provided task.
-        self.resume_task_immediately(task);
-
-        // Now we are back in the scheduler context, having
-        // successfully run the input task. Start by running the
-        // scheduler. Grab it out of TLS - performing the scheduler
-        // action will have given it away.
-        let sched: ~Scheduler = Local::take();
-
-        rtdebug!("starting scheduler {}", sched.sched_id());
-        sched.run();
-
-        // Close the idle callback.
-        let mut sched: ~Scheduler = Local::take();
-        sched.idle_callback.take();
-        // Make one go through the loop to run the close callback.
-        sched.run();
-
-        // Now that we are done with the scheduler, clean up the
-        // scheduler task. Do so by removing it from TLS and manually
-        // cleaning up the memory it uses. As we didn't actually call
-        // task.run() on the scheduler task we never get through all
-        // the cleanup code it runs.
-        let mut stask: ~Task = Local::take();
-
-        rtdebug!("stopping scheduler {}", stask.sched.get_ref().sched_id());
-
-        // Should not have any messages
-        let message = stask.sched.get_mut_ref().message_queue.pop();
-        rtassert!(match message { mpsc::Empty => true, _ => false });
-
-        stask.destroyed = true;
-    }
-
-    // This does not return a scheduler, as the scheduler is placed
-    // inside the task.
-    pub fn run(mut ~self) {
-
-        // This is unsafe because we need to place the scheduler, with
-        // the event_loop inside, inside our task. But we still need a
-        // mutable reference to the event_loop to give it the "run"
-        // command.
-        unsafe {
-            let event_loop: *mut ~EventLoop = &mut self.event_loop;
-
-            {
-                // Our scheduler must be in the task before the event loop
-                // is started.
-                let mut stask = Local::borrow(None::<Task>);
-                stask.get().sched = Some(self);
-            }
-
-            (*event_loop).run();
-        }
-    }
-
-    // * Execution Functions - Core Loop Logic
-
-    // The model for this function is that you continue through it
-    // until you either use the scheduler while performing a schedule
-    // action, in which case you give it away and return early, or
-    // you reach the end and sleep. In the case that a scheduler
-    // action is performed the loop is evented such that this function
-    // is called again.
-    fn run_sched_once() {
-
-        // When we reach the scheduler context via the event loop we
-        // already have a scheduler stored in our local task, so we
-        // start off by taking it. This is the only path through the
-        // scheduler where we get the scheduler this way.
-        let mut sched: ~Scheduler = Local::take();
-
-        // Assume that we need to continue idling unless we reach the
-        // end of this function without performing an action.
-        sched.idle_callback.get_mut_ref().resume();
-
-        // First we check for scheduler messages, these are higher
-        // priority than regular tasks.
-        let sched = match sched.interpret_message_queue(DontTryTooHard) {
-            Some(sched) => sched,
-            None => return
-        };
-
-        // This helper will use a randomized work-stealing algorithm
-        // to find work.
-        let sched = match sched.do_work() {
-            Some(sched) => sched,
-            None => return
-        };
-
-        // Now, before sleeping we need to find out if there really
-        // were any messages. Give it your best!
-        let mut sched = match sched.interpret_message_queue(GiveItYourBest) {
-            Some(sched) => sched,
-            None => return
-        };
-
-        // If we got here then there was no work to do.
-        // Generate a SchedHandle and push it to the sleeper list so
-        // somebody can wake us up later.
-        if !sched.sleepy && !sched.no_sleep {
-            rtdebug!("scheduler has no work to do, going to sleep");
-            sched.sleepy = true;
-            let handle = sched.make_handle();
-            sched.sleeper_list.push(handle);
-            // Since we are sleeping, deactivate the idle callback.
-            sched.idle_callback.get_mut_ref().pause();
-        } else {
-            rtdebug!("not sleeping, already doing so or no_sleep set");
-            // We may not be sleeping, but we still need to deactivate
-            // the idle callback.
-            sched.idle_callback.get_mut_ref().pause();
-        }
-
-        // Finished a cycle without using the Scheduler. Place it back
-        // in TLS.
-        Local::put(sched);
-    }
-
-    // This function returns None if the scheduler is "used", or it
-    // returns the still-available scheduler. At this point all
-    // message-handling will count as a turn of work, and as a result
-    // return None.
-    fn interpret_message_queue(mut ~self, effort: EffortLevel) -> Option<~Scheduler> {
-
-        let msg = if effort == DontTryTooHard {
-            self.message_queue.casual_pop()
-        } else {
-            // When popping our message queue, we could see an "inconsistent"
-            // state which means that we *should* be able to pop data, but we
-            // are unable to at this time. Our options are:
-            //
-            //  1. Spin waiting for data
-            //  2. Ignore this and pretend we didn't find a message
-            //
-            // If we choose route 1, then if the pusher in question is currently
-            // pre-empted, we're going to take up our entire time slice just
-            // spinning on this queue. If we choose route 2, then the pusher in
-            // question is still guaranteed to make a send() on its async
-            // handle, so we will guaranteed wake up and see its message at some
-            // point.
-            //
-            // I have chosen to take route #2.
-            match self.message_queue.pop() {
-                mpsc::Data(t) => Some(t),
-                mpsc::Empty | mpsc::Inconsistent => None
-            }
-        };
-
-        match msg {
-            Some(PinnedTask(task)) => {
-                let mut task = task;
-                task.give_home(Sched(self.make_handle()));
-                self.resume_task_immediately(task);
-                return None;
-            }
-            Some(TaskFromFriend(task)) => {
-                rtdebug!("got a task from a friend. lovely!");
-                self.process_task(task, Scheduler::resume_task_immediately_cl);
-                return None;
-            }
-            Some(RunOnce(task)) => {
-                // bypass the process_task logic to force running this task once
-                // on this home scheduler. This is often used for I/O (homing).
-                Scheduler::resume_task_immediately_cl(self, task);
-                return None;
-            }
-            Some(Wake) => {
-                self.sleepy = false;
-                Local::put(self);
-                return None;
-            }
-            Some(Shutdown) => {
-                rtdebug!("shutting down");
-                if self.sleepy {
-                    // There may be an outstanding handle on the
-                    // sleeper list.  Pop them all to make sure that's
-                    // not the case.
-                    loop {
-                        match self.sleeper_list.pop() {
-                            Some(handle) => {
-                                let mut handle = handle;
-                                handle.send(Wake);
-                            }
-                            None => break
-                        }
-                    }
-                }
-                // No more sleeping. After there are no outstanding
-                // event loop references we will shut down.
-                self.no_sleep = true;
-                self.sleepy = false;
-                Local::put(self);
-                return None;
-            }
-            None => {
-                return Some(self);
-            }
-        }
-    }
-
-    fn do_work(mut ~self) -> Option<~Scheduler> {
-        rtdebug!("scheduler calling do work");
-        match self.find_work() {
-            Some(task) => {
-                rtdebug!("found some work! processing the task");
-                self.process_task(task, Scheduler::resume_task_immediately_cl);
-                return None;
-            }
-            None => {
-                rtdebug!("no work was found, returning the scheduler struct");
-                return Some(self);
-            }
-        }
-    }
-
-    // Workstealing: In this iteration of the runtime each scheduler
-    // thread has a distinct work queue. When no work is available
-    // locally, make a few attempts to steal work from the queues of
-    // other scheduler threads. If a few steals fail we end up in the
-    // old "no work" path which is fine.
-
-    // First step in the process is to find a task. This function does
-    // that by first checking the local queue, and if there is no work
-    // there, trying to steal from the remote work queues.
-    fn find_work(&mut self) -> Option<~Task> {
-        rtdebug!("scheduler looking for work");
-        if !self.steal_for_yield {
-            match self.work_queue.pop() {
-                Some(task) => {
-                    rtdebug!("found a task locally");
-                    return Some(task)
-                }
-                None => {
-                    rtdebug!("scheduler trying to steal");
-                    return self.try_steals();
-                }
-            }
-        } else {
-            // During execution of the last task, it performed a 'yield',
-            // so we're doing some work stealing in order to introduce some
-            // scheduling randomness. Otherwise we would just end up popping
-            // that same task again. This is pretty lame and is to work around
-            // the problem that work stealing is not designed for 'non-strict'
-            // (non-fork-join) task parallelism.
-            self.steal_for_yield = false;
-            match self.try_steals() {
-                Some(task) => {
-                    rtdebug!("stole a task after yielding");
-                    return Some(task);
-                }
-                None => {
-                    rtdebug!("did not steal a task after yielding");
-                    // Back to business
-                    return self.find_work();
-                }
-            }
-        }
-    }
-
-    // Try stealing from all queues the scheduler knows about. This
-    // naive implementation can steal from our own queue or from other
-    // special schedulers.
-    fn try_steals(&mut self) -> Option<~Task> {
-        let work_queues = &mut self.work_queues;
-        let len = work_queues.len();
-        let start_index = self.rng.gen_range(0, len);
-        for index in range(0, len).map(|i| (i + start_index) % len) {
-            match work_queues[index].steal() {
-                deque::Data(task) => {
-                    rtdebug!("found task by stealing");
-                    return Some(task)
-                }
-                _ => ()
-            }
-        };
-        rtdebug!("giving up on stealing");
-        return None;
-    }
-
-    // * Task Routing Functions - Make sure tasks send up in the right
-    // place.
-
-    fn process_task(mut ~self, mut task: ~Task, schedule_fn: SchedulingFn) {
-        rtdebug!("processing a task");
-
-        let home = task.take_unwrap_home();
-        match home {
-            Sched(home_handle) => {
-                if home_handle.sched_id != self.sched_id() {
-                    rtdebug!("sending task home");
-                    task.give_home(Sched(home_handle));
-                    Scheduler::send_task_home(task);
-                    Local::put(self);
-                } else {
-                    rtdebug!("running task here");
-                    task.give_home(Sched(home_handle));
-                    schedule_fn(self, task);
-                }
-            }
-            AnySched if self.run_anything => {
-                rtdebug!("running anysched task here");
-                task.give_home(AnySched);
-                schedule_fn(self, task);
-            }
-            AnySched => {
-                rtdebug!("sending task to friend");
-                task.give_home(AnySched);
-                self.send_to_friend(task);
-                Local::put(self);
-            }
-        }
-    }
-
-    fn send_task_home(task: ~Task) {
-        let mut task = task;
-        let mut home = task.take_unwrap_home();
-        match home {
-            Sched(ref mut home_handle) => {
-                home_handle.send(PinnedTask(task));
-            }
-            AnySched => {
-                        rtabort!("error: cannot send anysched task home");
-            }
-        }
-    }
-
-    /// Take a non-homed task we aren't allowed to run here and send
-    /// it to the designated friend scheduler to execute.
-    fn send_to_friend(&mut self, task: ~Task) {
-        rtdebug!("sending a task to friend");
-        match self.friend_handle {
-            Some(ref mut handle) => {
-                handle.send(TaskFromFriend(task));
-            }
-            None => {
-                rtabort!("tried to send task to a friend but scheduler has no friends");
-            }
-        }
-    }
-
-    /// Schedule a task to be executed later.
-    ///
-    /// Pushes the task onto the work stealing queue and tells the
-    /// event loop to run it later. Always use this instead of pushing
-    /// to the work queue directly.
-    pub fn enqueue_task(&mut self, task: ~Task) {
-
-        // We push the task onto our local queue clone.
-        self.work_queue.push(task);
-        self.idle_callback.get_mut_ref().resume();
-
-        // We've made work available. Notify a
-        // sleeping scheduler.
-
-        match self.sleeper_list.casual_pop() {
-            Some(handle) => {
-                let mut handle = handle;
-                handle.send(Wake)
-            }
-            None => { (/* pass */) }
-        };
-    }
-
-    /// As enqueue_task, but with the possibility for the blocked task to
-    /// already have been killed.
-    pub fn enqueue_blocked_task(&mut self, blocked_task: BlockedTask) {
-        blocked_task.wake().map(|task| self.enqueue_task(task));
-    }
-
-    // * Core Context Switching Functions
-
-    // The primary function for changing contexts. In the current
-    // design the scheduler is just a slightly modified GreenTask, so
-    // all context swaps are from Task to Task. The only difference
-    // between the various cases is where the inputs come from, and
-    // what is done with the resulting task. That is specified by the
-    // cleanup function f, which takes the scheduler and the
-    // old task as inputs.
-
-    pub fn change_task_context(mut ~self,
-                               next_task: ~Task,
-                               f: |&mut Scheduler, ~Task|) {
-        // The current task is grabbed from TLS, not taken as an input.
-        // Doing an unsafe_take to avoid writing back a null pointer -
-        // We're going to call `put` later to do that.
-        let current_task: ~Task = unsafe { Local::unsafe_take() };
-
-        // Check that the task is not in an atomically() section (e.g.,
-        // holding a pthread mutex, which could deadlock the scheduler).
-        current_task.death.assert_may_sleep();
-
-        // These transmutes do something fishy with a closure.
-        let f_fake_region = unsafe {
-            transmute::<|&mut Scheduler, ~Task|,
-                        |&mut Scheduler, ~Task|>(f)
-        };
-        let f_opaque = ClosureConverter::from_fn(f_fake_region);
-
-        // The current task is placed inside an enum with the cleanup
-        // function. This enum is then placed inside the scheduler.
-        self.cleanup_job = Some(CleanupJob::new(current_task, f_opaque));
-
-        // The scheduler is then placed inside the next task.
-        let mut next_task = next_task;
-        next_task.sched = Some(self);
-
-        // However we still need an internal mutable pointer to the
-        // original task. The strategy here was "arrange memory, then
-        // get pointers", so we crawl back up the chain using
-        // transmute to eliminate borrowck errors.
-        unsafe {
-
-            let sched: &mut Scheduler =
-                transmute_mut_region(*next_task.sched.get_mut_ref());
-
-            let current_task: &mut Task = match sched.cleanup_job {
-                Some(CleanupJob { task: ref task, .. }) => {
-                    let task_ptr: *~Task = task;
-                    transmute_mut_region(*transmute_mut_unsafe(task_ptr))
-                }
-                None => {
-                    rtabort!("no cleanup job");
-                }
-            };
-
-            let (current_task_context, next_task_context) =
-                Scheduler::get_contexts(current_task, next_task);
-
-            // Done with everything - put the next task in TLS. This
-            // works because due to transmute the borrow checker
-            // believes that we have no internal pointers to
-            // next_task.
-            Local::put(next_task);
-
-            // The raw context swap operation. The next action taken
-            // will be running the cleanup job from the context of the
-            // next task.
-            Context::swap(current_task_context, next_task_context);
-        }
-
-        // When the context swaps back to this task we immediately
-        // run the cleanup job, as expected by the previously called
-        // swap_contexts function.
-        unsafe {
-            let task: *mut Task = Local::unsafe_borrow();
-            (*task).sched.get_mut_ref().run_cleanup_job();
-
-            // See the comments in switch_running_tasks_and_then for why a lock
-            // is acquired here. This is the resumption points and the "bounce"
-            // that it is referring to.
-            (*task).nasty_deschedule_lock.lock();
-            (*task).nasty_deschedule_lock.unlock();
-        }
-    }
-
-    // Returns a mutable reference to both contexts involved in this
-    // swap. This is unsafe - we are getting mutable internal
-    // references to keep even when we don't own the tasks. It looks
-    // kinda safe because we are doing transmutes before passing in
-    // the arguments.
-    pub fn get_contexts<'a>(current_task: &mut Task, next_task: &mut Task) ->
-        (&'a mut Context, &'a mut Context) {
-        let current_task_context =
-            &mut current_task.coroutine.get_mut_ref().saved_context;
-        let next_task_context =
-                &mut next_task.coroutine.get_mut_ref().saved_context;
-        unsafe {
-            (transmute_mut_region(current_task_context),
-             transmute_mut_region(next_task_context))
-        }
-    }
-
-    // * Context Swapping Helpers - Here be ugliness!
-
-    pub fn resume_task_immediately(~self, task: ~Task) {
-        self.change_task_context(task, |sched, stask| {
-            sched.sched_task = Some(stask);
-        })
-    }
-
-    fn resume_task_immediately_cl(sched: ~Scheduler,
-                                  task: ~Task) {
-        sched.resume_task_immediately(task)
-    }
-
-
-    pub fn resume_blocked_task_immediately(~self, blocked_task: BlockedTask) {
-        match blocked_task.wake() {
-            Some(task) => { self.resume_task_immediately(task); }
-            None => Local::put(self)
-        };
-    }
-
-    /// Block a running task, context switch to the scheduler, then pass the
-    /// blocked task to a closure.
-    ///
-    /// # Safety note
-    ///
-    /// The closure here is a *stack* closure that lives in the
-    /// running task.  It gets transmuted to the scheduler's lifetime
-    /// and called while the task is blocked.
-    ///
-    /// This passes a Scheduler pointer to the fn after the context switch
-    /// in order to prevent that fn from performing further scheduling operations.
-    /// Doing further scheduling could easily result in infinite recursion.
-    ///
-    /// Note that if the closure provided relinquishes ownership of the
-    /// BlockedTask, then it is possible for the task to resume execution before
-    /// the closure has finished executing. This would naturally introduce a
-    /// race if the closure and task shared portions of the environment.
-    ///
-    /// This situation is currently prevented, or in other words it is
-    /// guaranteed that this function will not return before the given closure
-    /// has returned.
-    pub fn deschedule_running_task_and_then(mut ~self,
-                                            f: |&mut Scheduler, BlockedTask|) {
-        // Trickier - we need to get the scheduler task out of self
-        // and use it as the destination.
-        let stask = self.sched_task.take_unwrap();
-        // Otherwise this is the same as below.
-        self.switch_running_tasks_and_then(stask, f);
-    }
-
-    pub fn switch_running_tasks_and_then(~self, next_task: ~Task,
-                                         f: |&mut Scheduler, BlockedTask|) {
-        // And here comes one of the sad moments in which a lock is used in a
-        // core portion of the rust runtime. As always, this is highly
-        // undesirable, so there's a good reason behind it.
-        //
-        // There is an excellent outline of the problem in issue #8132, and it's
-        // summarized in that `f` is executed on a sched task, but its
-        // environment is on the previous task. If `f` relinquishes ownership of
-        // the BlockedTask, then it may introduce a race where `f` is using the
-        // environment as well as the code after the 'deschedule' block.
-        //
-        // The solution we have chosen to adopt for now is to acquire a
-        // task-local lock around this block. The resumption of the task in
-        // context switching will bounce on the lock, thereby waiting for this
-        // block to finish, eliminating the race mentioned above.
-        //
-        // To actually maintain a handle to the lock, we use an unsafe pointer
-        // to it, but we're guaranteed that the task won't exit until we've
-        // unlocked the lock so there's no worry of this memory going away.
-        self.change_task_context(next_task, |sched, mut task| {
-            let lock: *mut Mutex = &mut task.nasty_deschedule_lock;
-            unsafe { (*lock).lock() }
-            f(sched, BlockedTask::block(task));
-            unsafe { (*lock).unlock() }
-        })
-    }
-
-    fn switch_task(sched: ~Scheduler, task: ~Task) {
-        sched.switch_running_tasks_and_then(task, |sched, last_task| {
-            sched.enqueue_blocked_task(last_task);
-        });
-    }
-
-    // * Task Context Helpers
-
-    /// Called by a running task to end execution, after which it will
-    /// be recycled by the scheduler for reuse in a new task.
-    pub fn terminate_current_task(mut ~self) {
-        // Similar to deschedule running task and then, but cannot go through
-        // the task-blocking path. The task is already dying.
-        let stask = self.sched_task.take_unwrap();
-        self.change_task_context(stask, |sched, mut dead_task| {
-            let coroutine = dead_task.coroutine.take_unwrap();
-            coroutine.recycle(&mut sched.stack_pool);
-        })
-    }
-
-    pub fn run_task(task: ~Task) {
-        let sched: ~Scheduler = Local::take();
-        sched.process_task(task, Scheduler::switch_task);
-    }
-
-    pub fn run_task_later(next_task: ~Task) {
-        let mut sched = Local::borrow(None::<Scheduler>);
-        sched.get().enqueue_task(next_task);
-    }
-
-    /// Yield control to the scheduler, executing another task. This is guaranteed
-    /// to introduce some amount of randomness to the scheduler. Currently the
-    /// randomness is a result of performing a round of work stealing (which
-    /// may end up stealing from the current scheduler).
-    pub fn yield_now(mut ~self) {
-        self.yield_check_count = reset_yield_check(&mut self.rng);
-        // Tell the scheduler to start stealing on the next iteration
-        self.steal_for_yield = true;
-        self.deschedule_running_task_and_then(|sched, task| {
-            sched.enqueue_blocked_task(task);
-        })
-    }
-
-    pub fn maybe_yield(mut ~self) {
-        // The number of times to do the yield check before yielding, chosen arbitrarily.
-        rtassert!(self.yield_check_count > 0);
-        self.yield_check_count -= 1;
-        if self.yield_check_count == 0 {
-            self.yield_now();
-        } else {
-            Local::put(self);
-        }
-    }
-
-
-    // * Utility Functions
-
-    pub fn sched_id(&self) -> uint { to_uint(self) }
-
-    pub fn run_cleanup_job(&mut self) {
-        let cleanup_job = self.cleanup_job.take_unwrap();
-        cleanup_job.run(self);
-    }
-
-    pub fn make_handle(&mut self) -> SchedHandle {
-        let remote = self.event_loop.remote_callback(~SchedRunner as ~Callback);
-
-        return SchedHandle {
-            remote: remote,
-            queue: self.message_producer.clone(),
-            sched_id: self.sched_id()
-        };
-    }
-}
-
-// Supporting types
-
-type SchedulingFn = extern "Rust" fn (~Scheduler, ~Task);
-
-pub enum SchedMessage {
-    Wake,
-    Shutdown,
-    PinnedTask(~Task),
-    TaskFromFriend(~Task),
-    RunOnce(~Task),
-}
-
-pub struct SchedHandle {
-    priv remote: ~RemoteCallback,
-    priv queue: mpsc::Producer<SchedMessage, ()>,
-    sched_id: uint
-}
-
-impl SchedHandle {
-    pub fn send(&mut self, msg: SchedMessage) {
-        self.queue.push(msg);
-        self.remote.fire();
-    }
-}
-
-struct SchedRunner;
-
-impl Callback for SchedRunner {
-    fn call(&mut self) {
-        Scheduler::run_sched_once();
-    }
-}
-
-struct CleanupJob {
-    task: ~Task,
-    f: UnsafeTaskReceiver
-}
-
-impl CleanupJob {
-    pub fn new(task: ~Task, f: UnsafeTaskReceiver) -> CleanupJob {
-        CleanupJob {
-            task: task,
-            f: f
-        }
-    }
-
-    pub fn run(self, sched: &mut Scheduler) {
-        let CleanupJob { task: task, f: f } = self;
-        f.to_fn()(sched, task)
-    }
-}
-
-// XXX: Some hacks to put a || closure in Scheduler without borrowck
-// complaining
-type UnsafeTaskReceiver = raw::Closure;
-trait ClosureConverter {
-    fn from_fn(|&mut Scheduler, ~Task|) -> Self;
-    fn to_fn(self) -> |&mut Scheduler, ~Task|;
-}
-impl ClosureConverter for UnsafeTaskReceiver {
-    fn from_fn(f: |&mut Scheduler, ~Task|) -> UnsafeTaskReceiver {
-        unsafe { transmute(f) }
-    }
-    fn to_fn(self) -> |&mut Scheduler, ~Task| { unsafe { transmute(self) } }
-}
-
-// On unix, we read randomness straight from /dev/urandom, but the
-// default constructor of an XorShiftRng does this via io::fs, which
-// relies on the scheduler existing, so we have to manually load
-// randomness. Windows has its own C API for this, so we don't need to
-// worry there.
-#[cfg(windows)]
-fn new_sched_rng() -> XorShiftRng {
-    XorShiftRng::new()
-}
-#[cfg(unix)]
-fn new_sched_rng() -> XorShiftRng {
-    use libc;
-    use mem;
-    use c_str::ToCStr;
-    use vec::MutableVector;
-    use iter::Iterator;
-    use rand::SeedableRng;
-
-    let fd = "/dev/urandom".with_c_str(|name| {
-        unsafe { libc::open(name, libc::O_RDONLY, 0) }
-    });
-    if fd == -1 {
-        rtabort!("could not open /dev/urandom for reading.")
-    }
-
-    let mut seeds = [0u32, .. 4];
-    let size = mem::size_of_val(&seeds);
-    loop {
-        let nbytes = unsafe {
-            libc::read(fd,
-                       seeds.as_mut_ptr() as *mut libc::c_void,
-                       size as libc::size_t)
-        };
-        rtassert!(nbytes as uint == size);
-
-        if !seeds.iter().all(|x| *x == 0) {
-            break;
-        }
-    }
-
-    unsafe {libc::close(fd);}
-
-    SeedableRng::from_seed(seeds)
-}
-
-#[cfg(test)]
-mod test {
-    use prelude::*;
-
-    use borrow::to_uint;
-    use rt::deque::BufferPool;
-    use rt::basic;
-    use rt::sched::{Scheduler};
-    use rt::task::{Task, Sched};
-    use rt::test::*;
-    use rt::thread::Thread;
-    use rt::util;
-    use task::TaskResult;
-    use unstable::run_in_bare_thread;
-
-    #[test]
-    fn trivial_run_in_newsched_task_test() {
-        let mut task_ran = false;
-        let task_ran_ptr: *mut bool = &mut task_ran;
-        do run_in_newsched_task || {
-            unsafe { *task_ran_ptr = true };
-            rtdebug!("executed from the new scheduler")
-        }
-        assert!(task_ran);
-    }
-
-    #[test]
-    fn multiple_task_test() {
-        let total = 10;
-        let mut task_run_count = 0;
-        let task_run_count_ptr: *mut uint = &mut task_run_count;
-        do run_in_newsched_task || {
-            for _ in range(0u, total) {
-                do spawntask || {
-                    unsafe { *task_run_count_ptr = *task_run_count_ptr + 1};
-                }
-            }
-        }
-        assert!(task_run_count == total);
-    }
-
-    #[test]
-    fn multiple_task_nested_test() {
-        let mut task_run_count = 0;
-        let task_run_count_ptr: *mut uint = &mut task_run_count;
-        do run_in_newsched_task || {
-            do spawntask || {
-                unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 };
-                do spawntask || {
-                    unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 };
-                    do spawntask || {
-                        unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 };
-                    }
-                }
-            }
-        }
-        assert!(task_run_count == 3);
-    }
-
-    // Confirm that a sched_id actually is the uint form of the
-    // pointer to the scheduler struct.
-    #[test]
-    fn simple_sched_id_test() {
-        do run_in_bare_thread {
-            let sched = ~new_test_uv_sched();
-            assert!(to_uint(sched) == sched.sched_id());
-        }
-    }
-
-    // Compare two scheduler ids that are different, this should never
-    // fail but may catch a mistake someday.
-    #[test]
-    fn compare_sched_id_test() {
-        do run_in_bare_thread {
-            let sched_one = ~new_test_uv_sched();
-            let sched_two = ~new_test_uv_sched();
-            assert!(sched_one.sched_id() != sched_two.sched_id());
-        }
-    }
-
-
-    // A very simple test that confirms that a task executing on the
-    // home scheduler notices that it is home.
-    #[test]
-    fn test_home_sched() {
-        do run_in_bare_thread {
-            let mut task_ran = false;
-            let task_ran_ptr: *mut bool = &mut task_ran;
-
-            let mut sched = ~new_test_uv_sched();
-            let sched_handle = sched.make_handle();
-
-            let mut task = ~do Task::new_root_homed(&mut sched.stack_pool, None,
-                                                Sched(sched_handle)) {
-                unsafe { *task_ran_ptr = true };
-                assert!(Task::on_appropriate_sched());
-            };
-
-            let on_exit: proc(TaskResult) = proc(exit_status) {
-                rtassert!(exit_status.is_ok())
-            };
-            task.death.on_exit = Some(on_exit);
-
-            sched.bootstrap(task);
-        }
-    }
-
-    // An advanced test that checks all four possible states that a
-    // (task,sched) can be in regarding homes.
-
-    #[test]
-    fn test_schedule_home_states() {
-        use rt::sleeper_list::SleeperList;
-        use rt::sched::Shutdown;
-        use borrow;
-
-        do run_in_bare_thread {
-
-            let sleepers = SleeperList::new();
-            let mut pool = BufferPool::new();
-            let (normal_worker, normal_stealer) = pool.deque();
-            let (special_worker, special_stealer) = pool.deque();
-            let queues = ~[normal_stealer, special_stealer];
-
-            // Our normal scheduler
-            let mut normal_sched = ~Scheduler::new(
-                basic::event_loop(),
-                normal_worker,
-                queues.clone(),
-                sleepers.clone());
-
-            let normal_handle = normal_sched.make_handle();
-
-            let friend_handle = normal_sched.make_handle();
-
-            // Our special scheduler
-            let mut special_sched = ~Scheduler::new_special(
-                basic::event_loop(),
-                special_worker,
-                queues.clone(),
-                sleepers.clone(),
-                false,
-                Some(friend_handle));
-
-            let special_handle = special_sched.make_handle();
-
-            let t1_handle = special_sched.make_handle();
-            let t4_handle = special_sched.make_handle();
-
-            // Four test tasks:
-            //   1) task is home on special
-            //   2) task not homed, sched doesn't care
-            //   3) task not homed, sched requeues
-            //   4) task not home, send home
-
-            let task1 = ~do Task::new_root_homed(&mut special_sched.stack_pool, None,
-                                                 Sched(t1_handle)) || {
-                rtassert!(Task::on_appropriate_sched());
-            };
-            rtdebug!("task1 id: **{}**", borrow::to_uint(task1));
-
-            let task2 = ~do Task::new_root(&mut normal_sched.stack_pool, None) {
-                rtassert!(Task::on_appropriate_sched());
-            };
-
-            let task3 = ~do Task::new_root(&mut normal_sched.stack_pool, None) {
-                rtassert!(Task::on_appropriate_sched());
-            };
-
-            let task4 = ~do Task::new_root_homed(&mut special_sched.stack_pool, None,
-                                                 Sched(t4_handle)) {
-                rtassert!(Task::on_appropriate_sched());
-            };
-            rtdebug!("task4 id: **{}**", borrow::to_uint(task4));
-
-            // Signal from the special task that we are done.
-            let (port, chan) = Chan::<()>::new();
-
-            let normal_task = ~do Task::new_root(&mut normal_sched.stack_pool, None) {
-                rtdebug!("*about to submit task2*");
-                Scheduler::run_task(task2);
-                rtdebug!("*about to submit task4*");
-                Scheduler::run_task(task4);
-                rtdebug!("*normal_task done*");
-                port.recv();
-                let mut nh = normal_handle;
-                nh.send(Shutdown);
-                let mut sh = special_handle;
-                sh.send(Shutdown);
-            };
-
-            rtdebug!("normal task: {}", borrow::to_uint(normal_task));
-
-            let special_task = ~do Task::new_root(&mut special_sched.stack_pool, None) {
-                rtdebug!("*about to submit task1*");
-                Scheduler::run_task(task1);
-                rtdebug!("*about to submit task3*");
-                Scheduler::run_task(task3);
-                rtdebug!("*done with special_task*");
-                chan.send(());
-            };
-
-            rtdebug!("special task: {}", borrow::to_uint(special_task));
-
-            let normal_sched = normal_sched;
-            let normal_thread = do Thread::start {
-                normal_sched.bootstrap(normal_task);
-                rtdebug!("finished with normal_thread");
-            };
-
-            let special_sched = special_sched;
-            let special_thread = do Thread::start {
-                special_sched.bootstrap(special_task);
-                rtdebug!("finished with special_sched");
-            };
-
-            normal_thread.join();
-            special_thread.join();
-        }
-    }
-
-    #[test]
-    fn test_stress_schedule_task_states() {
-        if util::limit_thread_creation_due_to_osx_and_valgrind() { return; }
-        let n = stress_factor() * 120;
-        for _ in range(0, n as int) {
-            test_schedule_home_states();
-        }
-    }
-
-    #[test]
-    fn test_io_callback() {
-        use io::timer;
-
-        // This is a regression test that when there are no schedulable tasks
-        // in the work queue, but we are performing I/O, that once we do put
-        // something in the work queue again the scheduler picks it up and doesn't
-        // exit before emptying the work queue
-        do run_in_uv_task {
-            do spawntask {
-                timer::sleep(10);
-            }
-        }
-    }
-
-    #[test]
-    fn handle() {
-        do run_in_bare_thread {
-            let (port, chan) = Chan::new();
-
-            let thread_one = do Thread::start {
-                let chan = chan;
-                do run_in_newsched_task_core {
-                    chan.send(());
-                }
-            };
-
-            let thread_two = do Thread::start {
-                let port = port;
-                do run_in_newsched_task_core {
-                    port.recv();
-                }
-            };
-
-            thread_two.join();
-            thread_one.join();
-        }
-    }
-
-    // A regression test that the final message is always handled.
-    // Used to deadlock because Shutdown was never recvd.
-    #[test]
-    fn no_missed_messages() {
-        use rt::sleeper_list::SleeperList;
-        use rt::stack::StackPool;
-        use rt::sched::{Shutdown, TaskFromFriend};
-
-        do run_in_bare_thread {
-            stress_factor().times(|| {
-                let sleepers = SleeperList::new();
-                let mut pool = BufferPool::new();
-                let (worker, stealer) = pool.deque();
-
-                let mut sched = ~Scheduler::new(
-                    basic::event_loop(),
-                    worker,
-                    ~[stealer],
-                    sleepers.clone());
-
-                let mut handle = sched.make_handle();
-
-                let sched = sched;
-                let thread = do Thread::start {
-                    let mut sched = sched;
-                    let bootstrap_task =
-                        ~Task::new_root(&mut sched.stack_pool,
-                                        None,
-                                        proc()());
-                    sched.bootstrap(bootstrap_task);
-                };
-
-                let mut stack_pool = StackPool::new();
-                let task = ~Task::new_root(&mut stack_pool, None, proc()());
-                handle.send(TaskFromFriend(task));
-
-                handle.send(Shutdown);
-                drop(handle);
-
-                thread.join();
-            })
-        }
-    }
-
-    #[test]
-    fn multithreading() {
-        use num::Times;
-        use vec::OwnedVector;
-        use container::Container;
-
-        do run_in_mt_newsched_task {
-            let mut ports = ~[];
-            10.times(|| {
-                let (port, chan) = Chan::new();
-                do spawntask_later {
-                    chan.send(());
-                }
-                ports.push(port);
-            });
-
-            while !ports.is_empty() {
-                ports.pop().recv();
-            }
-        }
-    }
-
-     #[test]
-    fn thread_ring() {
-        do run_in_mt_newsched_task {
-            let (end_port, end_chan) = Chan::new();
-
-            let n_tasks = 10;
-            let token = 2000;
-
-            let (mut p, ch1) = Chan::new();
-            ch1.send((token, end_chan));
-            let mut i = 2;
-            while i <= n_tasks {
-                let (next_p, ch) = Chan::new();
-                let imm_i = i;
-                let imm_p = p;
-                do spawntask_random {
-                    roundtrip(imm_i, n_tasks, &imm_p, &ch);
-                };
-                p = next_p;
-                i += 1;
-            }
-            let p = p;
-            do spawntask_random {
-                roundtrip(1, n_tasks, &p, &ch1);
-            }
-
-            end_port.recv();
-        }
-
-        fn roundtrip(id: int, n_tasks: int,
-                     p: &Port<(int, Chan<()>)>,
-                     ch: &Chan<(int, Chan<()>)>) {
-            while (true) {
-                match p.recv() {
-                    (1, end_chan) => {
-                        debug!("{}\n", id);
-                        end_chan.send(());
-                        return;
-                    }
-                    (token, end_chan) => {
-                        debug!("thread: {}   got token: {}", id, token);
-                        ch.send((token - 1, end_chan));
-                        if token <= n_tasks {
-                            return;
-                        }
-                    }
-                }
-            }
-        }
-    }
-
-    #[test]
-    fn start_closure_dtor() {
-        use ops::Drop;
-
-        // Regression test that the `start` task entrypoint can
-        // contain dtors that use task resources
-        do run_in_newsched_task {
-            struct S { field: () }
-
-            impl Drop for S {
-                fn drop(&mut self) {
-                    let _foo = @0;
-                }
-            }
-
-            let s = S { field: () };
-
-            do spawntask {
-                let _ss = &s;
-            }
-        }
-    }
-
-    // FIXME: #9407: xfail-test
-    #[ignore]
-    #[test]
-    fn dont_starve_1() {
-        stress_factor().times(|| {
-            do run_in_mt_newsched_task {
-                let (port, chan) = Chan::new();
-
-                // This task should not be able to starve the sender;
-                // The sender should get stolen to another thread.
-                do spawntask {
-                    while port.try_recv().is_none() { }
-                }
-
-                chan.send(());
-            }
-        })
-    }
-
-    #[test]
-    fn dont_starve_2() {
-        stress_factor().times(|| {
-            do run_in_newsched_task {
-                let (port, chan) = Chan::new();
-                let (_port2, chan2) = Chan::new();
-
-                // This task should not be able to starve the other task.
-                // The sends should eventually yield.
-                do spawntask {
-                    while port.try_recv().is_none() {
-                        chan2.send(());
-                    }
-                }
-
-                chan.send(());
-            }
-        })
-    }
-
-    // Regression test for a logic bug that would cause single-threaded schedulers
-    // to sleep forever after yielding and stealing another task.
-    #[test]
-    fn single_threaded_yield() {
-        use task::{spawn, spawn_sched, SingleThreaded, deschedule};
-        use num::Times;
-
-        do spawn_sched(SingleThreaded) {
-            5.times(|| { deschedule(); })
-        }
-        do spawn { }
-        do spawn { }
-    }
-}
diff --git a/src/libstd/rt/sleeper_list.rs b/src/libstd/rt/sleeper_list.rs
deleted file mode 100644
index 39c7431837f..00000000000
--- a/src/libstd/rt/sleeper_list.rs
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! Maintains a shared list of sleeping schedulers. Schedulers
-//! use this to wake each other up.
-
-use rt::sched::SchedHandle;
-use rt::mpmc_bounded_queue::Queue;
-use option::*;
-use clone::Clone;
-
-pub struct SleeperList {
-    priv q: Queue<SchedHandle>,
-}
-
-impl SleeperList {
-    pub fn new() -> SleeperList {
-        SleeperList{q: Queue::with_capacity(8*1024)}
-    }
-
-    pub fn push(&mut self, value: SchedHandle) {
-        assert!(self.q.push(value))
-    }
-
-    pub fn pop(&mut self) -> Option<SchedHandle> {
-        self.q.pop()
-    }
-
-    pub fn casual_pop(&mut self) -> Option<SchedHandle> {
-        self.q.pop()
-    }
-}
-
-impl Clone for SleeperList {
-    fn clone(&self) -> SleeperList {
-        SleeperList {
-            q: self.q.clone()
-        }
-    }
-}
diff --git a/src/libstd/rt/stack.rs b/src/libstd/rt/stack.rs
deleted file mode 100644
index 44b60e955d2..00000000000
--- a/src/libstd/rt/stack.rs
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use container::Container;
-use ptr::RawPtr;
-use vec;
-use ops::Drop;
-use libc::{c_uint, uintptr_t};
-
-pub struct StackSegment {
-    priv buf: ~[u8],
-    priv valgrind_id: c_uint
-}
-
-impl StackSegment {
-    pub fn new(size: uint) -> StackSegment {
-        unsafe {
-            // Crate a block of uninitialized values
-            let mut stack = vec::with_capacity(size);
-            stack.set_len(size);
-
-            let mut stk = StackSegment {
-                buf: stack,
-                valgrind_id: 0
-            };
-
-            // XXX: Using the FFI to call a C macro. Slow
-            stk.valgrind_id = rust_valgrind_stack_register(stk.start(), stk.end());
-            return stk;
-        }
-    }
-
-    /// Point to the low end of the allocated stack
-    pub fn start(&self) -> *uint {
-        self.buf.as_ptr() as *uint
-    }
-
-    /// Point one word beyond the high end of the allocated stack
-    pub fn end(&self) -> *uint {
-        unsafe {
-            self.buf.as_ptr().offset(self.buf.len() as int) as *uint
-        }
-    }
-}
-
-impl Drop for StackSegment {
-    fn drop(&mut self) {
-        unsafe {
-            // XXX: Using the FFI to call a C macro. Slow
-            rust_valgrind_stack_deregister(self.valgrind_id);
-        }
-    }
-}
-
-pub struct StackPool(());
-
-impl StackPool {
-    pub fn new() -> StackPool { StackPool(()) }
-
-    fn take_segment(&self, min_size: uint) -> StackSegment {
-        StackSegment::new(min_size)
-    }
-
-    fn give_segment(&self, _stack: StackSegment) {
-    }
-}
-
-extern {
-    fn rust_valgrind_stack_register(start: *uintptr_t, end: *uintptr_t) -> c_uint;
-    fn rust_valgrind_stack_deregister(id: c_uint);
-}
diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs
index 30e05e9091f..e6ab159a769 100644
--- a/src/libstd/rt/task.rs
+++ b/src/libstd/rt/task.rs
@@ -13,29 +13,41 @@
 //! local storage, and logging. Even a 'freestanding' Rust would likely want
 //! to implement this.
 
-use super::local_heap::LocalHeap;
-
-use prelude::*;
-
+use any::AnyOwnExt;
 use borrow;
+use cast;
 use cleanup;
 use io::Writer;
-use libc::{c_char, size_t};
+use iter::{Iterator, Take};
 use local_data;
+use ops::Drop;
 use option::{Option, Some, None};
+use prelude::drop;
+use result::{Result, Ok, Err};
+use rt::Runtime;
 use rt::borrowck::BorrowRecord;
 use rt::borrowck;
-use rt::context::Context;
-use rt::env;
-use rt::kill::Death;
 use rt::local::Local;
+use rt::local_heap::LocalHeap;
 use rt::logging::StdErrLogger;
-use rt::sched::{Scheduler, SchedHandle};
-use rt::stack::{StackSegment, StackPool};
+use rt::rtio::LocalIo;
 use rt::unwind::Unwinder;
 use send_str::SendStr;
+use sync::arc::UnsafeArc;
+use sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT};
+use task::{TaskResult, TaskOpts};
 use unstable::finally::Finally;
-use unstable::mutex::Mutex;
+use unstable::mutex::{Mutex, MUTEX_INIT};
+
+#[cfg(stage0)]
+pub use rt::unwind::begin_unwind;
+
+// These two statics are used as bookeeping to keep track of the rust runtime's
+// count of threads. In 1:1 contexts, this is used to know when to return from
+// the main function, and in M:N contexts this is used to know when to shut down
+// the pool of schedulers.
+static mut TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT;
+static mut TASK_LOCK: Mutex = MUTEX_INIT;
 
 // The Task struct represents all state associated with a rust
 // task. There are at this point two primary "subtypes" of task,
@@ -45,201 +57,90 @@ use unstable::mutex::Mutex;
 
 pub struct Task {
     heap: LocalHeap,
-    priv gc: GarbageCollector,
+    gc: GarbageCollector,
     storage: LocalStorage,
-    logger: Option<StdErrLogger>,
     unwinder: Unwinder,
     death: Death,
     destroyed: bool,
     name: Option<SendStr>,
-    coroutine: Option<Coroutine>,
-    sched: Option<~Scheduler>,
-    task_type: TaskType,
     // Dynamic borrowck debugging info
     borrow_list: Option<~[BorrowRecord]>,
+
+    logger: Option<StdErrLogger>,
     stdout_handle: Option<~Writer>,
 
-    // See the comments in the scheduler about why this is necessary
-    nasty_deschedule_lock: Mutex,
+    priv imp: Option<~Runtime>,
 }
 
-pub enum TaskType {
-    GreenTask(Option<SchedHome>),
-    SchedTask
-}
+pub struct GarbageCollector;
+pub struct LocalStorage(Option<local_data::Map>);
 
-/// A coroutine is nothing more than a (register context, stack) pair.
-pub struct Coroutine {
-    /// The segment of stack on which the task is currently running or
-    /// if the task is blocked, on which the task will resume
-    /// execution.
-    ///
-    /// Servo needs this to be public in order to tell SpiderMonkey
-    /// about the stack bounds.
-    current_stack_segment: StackSegment,
-    /// Always valid if the task is alive and not running.
-    saved_context: Context
+/// A handle to a blocked task. Usually this means having the ~Task pointer by
+/// ownership, but if the task is killable, a killer can steal it at any time.
+pub enum BlockedTask {
+    Owned(~Task),
+    Shared(UnsafeArc<AtomicUint>),
 }
 
-/// Some tasks have a dedicated home scheduler that they must run on.
-pub enum SchedHome {
-    AnySched,
-    Sched(SchedHandle)
+/// Per-task state related to task death, killing, failure, etc.
+pub struct Death {
+    // Action to be done with the exit code. If set, also makes the task wait
+    // until all its watched children exit before collecting the status.
+    on_exit: Option<proc(TaskResult)>,
 }
 
-pub struct GarbageCollector;
-pub struct LocalStorage(Option<local_data::Map>);
+pub struct BlockedTaskIterator {
+    priv inner: UnsafeArc<AtomicUint>,
+}
 
 impl Task {
-
-    // A helper to build a new task using the dynamically found
-    // scheduler and task. Only works in GreenTask context.
-    pub fn build_homed_child(stack_size: Option<uint>,
-                             f: proc(),
-                             home: SchedHome)
-                             -> ~Task {
-        let mut running_task = Local::borrow(None::<Task>);
-        let mut sched = running_task.get().sched.take_unwrap();
-        let new_task = ~running_task.get()
-                                    .new_child_homed(&mut sched.stack_pool,
-                                                     stack_size,
-                                                     home,
-                                                     f);
-        running_task.get().sched = Some(sched);
-        new_task
-    }
-
-    pub fn build_child(stack_size: Option<uint>, f: proc()) -> ~Task {
-        Task::build_homed_child(stack_size, f, AnySched)
-    }
-
-    pub fn build_homed_root(stack_size: Option<uint>,
-                            f: proc(),
-                            home: SchedHome)
-                            -> ~Task {
-        let mut running_task = Local::borrow(None::<Task>);
-        let mut sched = running_task.get().sched.take_unwrap();
-        let new_task = ~Task::new_root_homed(&mut sched.stack_pool,
-                                             stack_size,
-                                             home,
-                                             f);
-        running_task.get().sched = Some(sched);
-        new_task
-    }
-
-    pub fn build_root(stack_size: Option<uint>, f: proc()) -> ~Task {
-        Task::build_homed_root(stack_size, f, AnySched)
-    }
-
-    pub fn new_sched_task() -> Task {
+    pub fn new() -> Task {
         Task {
             heap: LocalHeap::new(),
             gc: GarbageCollector,
             storage: LocalStorage(None),
-            logger: None,
-            unwinder: Unwinder { unwinding: false, cause: None },
+            unwinder: Unwinder::new(),
             death: Death::new(),
             destroyed: false,
-            coroutine: Some(Coroutine::empty()),
             name: None,
-            sched: None,
-            task_type: SchedTask,
             borrow_list: None,
-            stdout_handle: None,
-            nasty_deschedule_lock: unsafe { Mutex::new() },
-        }
-    }
-
-    pub fn new_root(stack_pool: &mut StackPool,
-                    stack_size: Option<uint>,
-                    start: proc()) -> Task {
-        Task::new_root_homed(stack_pool, stack_size, AnySched, start)
-    }
-
-    pub fn new_child(&mut self,
-                     stack_pool: &mut StackPool,
-                     stack_size: Option<uint>,
-                     start: proc()) -> Task {
-        self.new_child_homed(stack_pool, stack_size, AnySched, start)
-    }
-
-    pub fn new_root_homed(stack_pool: &mut StackPool,
-                          stack_size: Option<uint>,
-                          home: SchedHome,
-                          start: proc()) -> Task {
-        Task {
-            heap: LocalHeap::new(),
-            gc: GarbageCollector,
-            storage: LocalStorage(None),
             logger: None,
-            unwinder: Unwinder { unwinding: false, cause: None },
-            death: Death::new(),
-            destroyed: false,
-            name: None,
-            coroutine: Some(Coroutine::new(stack_pool, stack_size, start)),
-            sched: None,
-            task_type: GreenTask(Some(home)),
-            borrow_list: None,
             stdout_handle: None,
-            nasty_deschedule_lock: unsafe { Mutex::new() },
+            imp: None,
         }
     }
 
-    pub fn new_child_homed(&mut self,
-                           stack_pool: &mut StackPool,
-                           stack_size: Option<uint>,
-                           home: SchedHome,
-                           start: proc()) -> Task {
-        Task {
-            heap: LocalHeap::new(),
-            gc: GarbageCollector,
-            storage: LocalStorage(None),
-            logger: None,
-            unwinder: Unwinder { unwinding: false, cause: None },
-            death: Death::new(),
-            destroyed: false,
-            name: None,
-            coroutine: Some(Coroutine::new(stack_pool, stack_size, start)),
-            sched: None,
-            task_type: GreenTask(Some(home)),
-            borrow_list: None,
-            stdout_handle: None,
-            nasty_deschedule_lock: unsafe { Mutex::new() },
-        }
-    }
-
-    pub fn give_home(&mut self, new_home: SchedHome) {
-        match self.task_type {
-            GreenTask(ref mut home) => {
-                *home = Some(new_home);
-            }
-            SchedTask => {
-                rtabort!("type error: used SchedTask as GreenTask");
-            }
-        }
-    }
-
-    pub fn take_unwrap_home(&mut self) -> SchedHome {
-        match self.task_type {
-            GreenTask(ref mut home) => {
-                let out = home.take_unwrap();
-                return out;
-            }
-            SchedTask => {
-                rtabort!("type error: used SchedTask as GreenTask");
-            }
-        }
-    }
-
-    pub fn run(&mut self, f: ||) {
-        rtdebug!("run called on task: {}", borrow::to_uint(self));
+    /// Executes the given closure as if it's running inside this task. The task
+    /// is consumed upon entry, and the destroyed task is returned from this
+    /// function in order for the caller to free. This function is guaranteed to
+    /// not unwind because the closure specified is run inside of a `rust_try`
+    /// block. (this is the only try/catch block in the world).
+    ///
+    /// This function is *not* meant to be abused as a "try/catch" block. This
+    /// is meant to be used at the absolute boundaries of a task's lifetime, and
+    /// only for that purpose.
+    pub fn run(~self, f: ||) -> ~Task {
+        // Need to put ourselves into TLS, but also need access to the unwinder.
+        // Unsafely get a handle to the task so we can continue to use it after
+        // putting it in tls (so we can invoke the unwinder).
+        let handle: *mut Task = unsafe {
+            *cast::transmute::<&~Task, &*mut Task>(&self)
+        };
+        Local::put(self);
+        unsafe { TASK_COUNT.fetch_add(1, SeqCst); }
 
         // The only try/catch block in the world. Attempt to run the task's
         // client-specified code and catch any failures.
-        self.unwinder.try(|| {
+        let try_block = || {
 
             // Run the task main function, then do some cleanup.
             f.finally(|| {
+                fn flush(w: Option<~Writer>) {
+                    match w {
+                        Some(mut w) => { w.flush(); }
+                        None => {}
+                    }
+                }
 
                 // First, destroy task-local storage. This may run user dtors.
                 //
@@ -260,7 +161,10 @@ impl Task {
                 // TLS, or possibly some destructors for those objects being
                 // annihilated invoke TLS. Sadly these two operations seemed to
                 // be intertwined, and miraculously work for now...
-                self.storage.take();
+                let mut task = Local::borrow(None::<Task>);
+                let storage = task.get().storage.take();
+                drop(task);
+                drop(storage);
 
                 // Destroy remaining boxes. Also may run user dtors.
                 unsafe { cleanup::annihilate(); }
@@ -268,77 +172,141 @@ impl Task {
                 // Finally flush and destroy any output handles which the task
                 // owns. There are no boxes here, and no user destructors should
                 // run after this any more.
-                match self.stdout_handle.take() {
-                    Some(handle) => {
-                        let mut handle = handle;
-                        handle.flush();
-                    }
-                    None => {}
-                }
-                self.logger.take();
+                let mut task = Local::borrow(None::<Task>);
+                let stdout = task.get().stdout_handle.take();
+                let logger = task.get().logger.take();
+                drop(task);
+
+                flush(stdout);
+                drop(logger);
             })
-        });
+        };
+
+        unsafe { (*handle).unwinder.try(try_block); }
 
         // Cleanup the dynamic borrowck debugging info
         borrowck::clear_task_borrow_list();
 
-        self.death.collect_failure(self.unwinder.result());
-        self.destroyed = true;
+        // Here we must unsafely borrow the task in order to not remove it from
+        // TLS. When collecting failure, we may attempt to send on a channel (or
+        // just run aribitrary code), so we must be sure to still have a local
+        // task in TLS.
+        unsafe {
+            let me: *mut Task = Local::unsafe_borrow();
+            (*me).death.collect_failure((*me).unwinder.result());
+
+            // see comments on these statics for why they're used
+            if TASK_COUNT.fetch_sub(1, SeqCst) == 1 {
+                TASK_LOCK.lock();
+                TASK_LOCK.signal();
+                TASK_LOCK.unlock();
+            }
+        }
+        let mut me: ~Task = Local::take();
+        me.destroyed = true;
+        return me;
     }
 
-    // New utility functions for homes.
+    /// Inserts a runtime object into this task, transferring ownership to the
+    /// task. It is illegal to replace a previous runtime object in this task
+    /// with this argument.
+    pub fn put_runtime(&mut self, ops: ~Runtime) {
+        assert!(self.imp.is_none());
+        self.imp = Some(ops);
+    }
 
-    pub fn is_home_no_tls(&self, sched: &~Scheduler) -> bool {
-        match self.task_type {
-            GreenTask(Some(AnySched)) => { false }
-            GreenTask(Some(Sched(SchedHandle { sched_id: ref id, .. }))) => {
-                *id == sched.sched_id()
-            }
-            GreenTask(None) => {
-                rtabort!("task without home");
-            }
-            SchedTask => {
-                // Awe yea
-                rtabort!("type error: expected: GreenTask, found: SchedTask");
+    /// Attempts to extract the runtime as a specific type. If the runtime does
+    /// not have the provided type, then the runtime is not removed. If the
+    /// runtime does have the specified type, then it is removed and returned
+    /// (transfer of ownership).
+    ///
+    /// It is recommended to only use this method when *absolutely necessary*.
+    /// This function may not be available in the future.
+    pub fn maybe_take_runtime<T: 'static>(&mut self) -> Option<~T> {
+        // This is a terrible, terrible function. The general idea here is to
+        // take the runtime, cast it to ~Any, check if it has the right type,
+        // and then re-cast it back if necessary. The method of doing this is
+        // pretty sketchy and involves shuffling vtables of trait objects
+        // around, but it gets the job done.
+        //
+        // XXX: This function is a serious code smell and should be avoided at
+        //      all costs. I have yet to think of a method to avoid this
+        //      function, and I would be saddened if more usage of the function
+        //      crops up.
+        unsafe {
+            let imp = self.imp.take_unwrap();
+            let &(vtable, _): &(uint, uint) = cast::transmute(&imp);
+            match imp.wrap().move::<T>() {
+                Ok(t) => Some(t),
+                Err(t) => {
+                    let (_, obj): (uint, uint) = cast::transmute(t);
+                    let obj: ~Runtime = cast::transmute((vtable, obj));
+                    self.put_runtime(obj);
+                    None
+                }
             }
         }
     }
 
-    pub fn homed(&self) -> bool {
-        match self.task_type {
-            GreenTask(Some(AnySched)) => { false }
-            GreenTask(Some(Sched(SchedHandle { .. }))) => { true }
-            GreenTask(None) => {
-                rtabort!("task without home");
-            }
-            SchedTask => {
-                rtabort!("type error: expected: GreenTask, found: SchedTask");
-            }
-        }
+    /// Spawns a sibling to this task. The newly spawned task is configured with
+    /// the `opts` structure and will run `f` as the body of its code.
+    pub fn spawn_sibling(mut ~self, opts: TaskOpts, f: proc()) {
+        let ops = self.imp.take_unwrap();
+        ops.spawn_sibling(self, opts, f)
     }
 
-    // Grab both the scheduler and the task from TLS and check if the
-    // task is executing on an appropriate scheduler.
-    pub fn on_appropriate_sched() -> bool {
-        let mut task = Local::borrow(None::<Task>);
-        let sched_id = task.get().sched.get_ref().sched_id();
-        let sched_run_anything = task.get().sched.get_ref().run_anything;
-        match task.get().task_type {
-            GreenTask(Some(AnySched)) => {
-                rtdebug!("anysched task in sched check ****");
-                sched_run_anything
-            }
-            GreenTask(Some(Sched(SchedHandle { sched_id: ref id, ..}))) => {
-                rtdebug!("homed task in sched check ****");
-                *id == sched_id
-            }
-            GreenTask(None) => {
-                rtabort!("task without home");
-            }
-            SchedTask => {
-                rtabort!("type error: expected: GreenTask, found: SchedTask");
-            }
+    /// Deschedules the current task, invoking `f` `amt` times. It is not
+    /// recommended to use this function directly, but rather communication
+    /// primitives in `std::comm` should be used.
+    pub fn deschedule(mut ~self, amt: uint,
+                      f: |BlockedTask| -> Result<(), BlockedTask>) {
+        let ops = self.imp.take_unwrap();
+        ops.deschedule(amt, self, f)
+    }
+
+    /// Wakes up a previously blocked task, optionally specifiying whether the
+    /// current task can accept a change in scheduling. This function can only
+    /// be called on tasks that were previously blocked in `deschedule`.
+    pub fn reawaken(mut ~self, can_resched: bool) {
+        let ops = self.imp.take_unwrap();
+        ops.reawaken(self, can_resched);
+    }
+
+    /// Yields control of this task to another task. This function will
+    /// eventually return, but possibly not immediately. This is used as an
+    /// opportunity to allow other tasks a chance to run.
+    pub fn yield_now(mut ~self) {
+        let ops = self.imp.take_unwrap();
+        ops.yield_now(self);
+    }
+
+    /// Similar to `yield_now`, except that this function may immediately return
+    /// without yielding (depending on what the runtime decides to do).
+    pub fn maybe_yield(mut ~self) {
+        let ops = self.imp.take_unwrap();
+        ops.maybe_yield(self);
+    }
+
+    /// Acquires a handle to the I/O factory that this task contains, normally
+    /// stored in the task's runtime. This factory may not always be available,
+    /// which is why the return type is `Option`
+    pub fn local_io<'a>(&'a mut self) -> Option<LocalIo<'a>> {
+        self.imp.get_mut_ref().local_io()
+    }
+
+    /// The main function of all rust executables will by default use this
+    /// function. This function will *block* the OS thread (hence the `unsafe`)
+    /// waiting for all known tasks to complete. Once this function has
+    /// returned, it is guaranteed that no more user-defined code is still
+    /// running.
+    pub unsafe fn wait_for_other_tasks(&mut self) {
+        TASK_COUNT.fetch_sub(1, SeqCst); // don't count ourselves
+        TASK_LOCK.lock();
+        while TASK_COUNT.load(SeqCst) > 0 {
+            TASK_LOCK.wait();
         }
+        TASK_LOCK.unlock();
+        TASK_COUNT.fetch_add(1, SeqCst); // add ourselves back in
     }
 }
 
@@ -346,348 +314,192 @@ impl Drop for Task {
     fn drop(&mut self) {
         rtdebug!("called drop for a task: {}", borrow::to_uint(self));
         rtassert!(self.destroyed);
-
-        unsafe { self.nasty_deschedule_lock.destroy(); }
     }
 }
 
-// Coroutines represent nothing more than a context and a stack
-// segment.
-
-impl Coroutine {
-
-    pub fn new(stack_pool: &mut StackPool,
-               stack_size: Option<uint>,
-               start: proc())
-               -> Coroutine {
-        let stack_size = match stack_size {
-            Some(size) => size,
-            None => env::min_stack()
-        };
-        let start = Coroutine::build_start_wrapper(start);
-        let mut stack = stack_pool.take_segment(stack_size);
-        let initial_context = Context::new(start, &mut stack);
-        Coroutine {
-            current_stack_segment: stack,
-            saved_context: initial_context
-        }
+impl Iterator<BlockedTask> for BlockedTaskIterator {
+    fn next(&mut self) -> Option<BlockedTask> {
+        Some(Shared(self.inner.clone()))
     }
+}
 
-    pub fn empty() -> Coroutine {
-        Coroutine {
-            current_stack_segment: StackSegment::new(0),
-            saved_context: Context::empty()
+impl BlockedTask {
+    /// Returns Some if the task was successfully woken; None if already killed.
+    pub fn wake(self) -> Option<~Task> {
+        match self {
+            Owned(task) => Some(task),
+            Shared(arc) => unsafe {
+                match (*arc.get()).swap(0, SeqCst) {
+                    0 => None,
+                    n => Some(cast::transmute(n)),
+                }
+            }
         }
     }
 
-    fn build_start_wrapper(start: proc()) -> proc() {
-        let wrapper: proc() = proc() {
-            // First code after swap to this new context. Run our
-            // cleanup job.
-            unsafe {
+    // This assertion has two flavours because the wake involves an atomic op.
+    // In the faster version, destructors will fail dramatically instead.
+    #[cfg(not(test))] pub fn trash(self) { }
+    #[cfg(test)]      pub fn trash(self) { assert!(self.wake().is_none()); }
 
-                // Again - might work while safe, or it might not.
-                {
-                    let mut sched = Local::borrow(None::<Scheduler>);
-                    sched.get().run_cleanup_job();
-                }
+    /// Create a blocked task, unless the task was already killed.
+    pub fn block(task: ~Task) -> BlockedTask {
+        Owned(task)
+    }
 
-                // To call the run method on a task we need a direct
-                // reference to it. The task is in TLS, so we can
-                // simply unsafe_borrow it to get this reference. We
-                // need to still have the task in TLS though, so we
-                // need to unsafe_borrow.
-                let task: *mut Task = Local::unsafe_borrow();
-
-                let mut start_cell = Some(start);
-                (*task).run(|| {
-                    // N.B. Removing `start` from the start wrapper
-                    // closure by emptying a cell is critical for
-                    // correctness. The ~Task pointer, and in turn the
-                    // closure used to initialize the first call
-                    // frame, is destroyed in the scheduler context,
-                    // not task context. So any captured closures must
-                    // not contain user-definable dtors that expect to
-                    // be in task context. By moving `start` out of
-                    // the closure, all the user code goes our of
-                    // scope while the task is still running.
-                    let start = start_cell.take_unwrap();
-                    start();
-                });
+    /// Converts one blocked task handle to a list of many handles to the same.
+    pub fn make_selectable(self, num_handles: uint) -> Take<BlockedTaskIterator>
+    {
+        let arc = match self {
+            Owned(task) => {
+                let flag = unsafe { AtomicUint::new(cast::transmute(task)) };
+                UnsafeArc::new(flag)
             }
-
-            // We remove the sched from the Task in TLS right now.
-            let sched: ~Scheduler = Local::take();
-            // ... allowing us to give it away when performing a
-            // scheduling operation.
-            sched.terminate_current_task()
+            Shared(arc) => arc.clone(),
         };
-        return wrapper;
+        BlockedTaskIterator{ inner: arc }.take(num_handles)
     }
 
-    /// Destroy coroutine and try to reuse stack segment.
-    pub fn recycle(self, stack_pool: &mut StackPool) {
+    /// Convert to an unsafe uint value. Useful for storing in a pipe's state
+    /// flag.
+    #[inline]
+    pub unsafe fn cast_to_uint(self) -> uint {
         match self {
-            Coroutine { current_stack_segment, .. } => {
-                stack_pool.give_segment(current_stack_segment);
+            Owned(task) => {
+                let blocked_task_ptr: uint = cast::transmute(task);
+                rtassert!(blocked_task_ptr & 0x1 == 0);
+                blocked_task_ptr
+            }
+            Shared(arc) => {
+                let blocked_task_ptr: uint = cast::transmute(~arc);
+                rtassert!(blocked_task_ptr & 0x1 == 0);
+                blocked_task_ptr | 0x1
             }
         }
     }
 
-}
-
-/// This function is invoked from rust's current __morestack function. Segmented
-/// stacks are currently not enabled as segmented stacks, but rather one giant
-/// stack segment. This means that whenever we run out of stack, we want to
-/// truly consider it to be stack overflow rather than allocating a new stack.
-#[no_mangle]      // - this is called from C code
-#[no_split_stack] // - it would be sad for this function to trigger __morestack
-#[doc(hidden)]    // - Function must be `pub` to get exported, but it's
-                  //   irrelevant for documentation purposes.
-#[cfg(not(test))] // in testing, use the original libstd's version
-pub extern "C" fn rust_stack_exhausted() {
-    use rt::context;
-    use rt::in_green_task_context;
-    use rt::task::Task;
-    use rt::local::Local;
-    use unstable::intrinsics;
-
-    unsafe {
-        // We're calling this function because the stack just ran out. We need
-        // to call some other rust functions, but if we invoke the functions
-        // right now it'll just trigger this handler being called again. In
-        // order to alleviate this, we move the stack limit to be inside of the
-        // red zone that was allocated for exactly this reason.
-        let limit = context::get_sp_limit();
-        context::record_sp_limit(limit - context::RED_ZONE / 2);
-
-        // This probably isn't the best course of action. Ideally one would want
-        // to unwind the stack here instead of just aborting the entire process.
-        // This is a tricky problem, however. There's a few things which need to
-        // be considered:
-        //
-        //  1. We're here because of a stack overflow, yet unwinding will run
-        //     destructors and hence arbitrary code. What if that code overflows
-        //     the stack? One possibility is to use the above allocation of an
-        //     extra 10k to hope that we don't hit the limit, and if we do then
-        //     abort the whole program. Not the best, but kind of hard to deal
-        //     with unless we want to switch stacks.
-        //
-        //  2. LLVM will optimize functions based on whether they can unwind or
-        //     not. It will flag functions with 'nounwind' if it believes that
-        //     the function cannot trigger unwinding, but if we do unwind on
-        //     stack overflow then it means that we could unwind in any function
-        //     anywhere. We would have to make sure that LLVM only places the
-        //     nounwind flag on functions which don't call any other functions.
-        //
-        //  3. The function that overflowed may have owned arguments. These
-        //     arguments need to have their destructors run, but we haven't even
-        //     begun executing the function yet, so unwinding will not run the
-        //     any landing pads for these functions. If this is ignored, then
-        //     the arguments will just be leaked.
-        //
-        // Exactly what to do here is a very delicate topic, and is possibly
-        // still up in the air for what exactly to do. Some relevant issues:
-        //
-        //  #3555 - out-of-stack failure leaks arguments
-        //  #3695 - should there be a stack limit?
-        //  #9855 - possible strategies which could be taken
-        //  #9854 - unwinding on windows through __morestack has never worked
-        //  #2361 - possible implementation of not using landing pads
-
-        if in_green_task_context() {
-            let mut task = Local::borrow(None::<Task>);
-            let n = task.get()
-                        .name
-                        .as_ref()
-                        .map(|n| n.as_slice())
-                        .unwrap_or("<unnamed>");
-
-            // See the message below for why this is not emitted to the
-            // task's logger. This has the additional conundrum of the
-            // logger may not be initialized just yet, meaning that an FFI
-            // call would happen to initialized it (calling out to libuv),
-            // and the FFI call needs 2MB of stack when we just ran out.
-            rterrln!("task '{}' has overflowed its stack", n);
+    /// Convert from an unsafe uint value. Useful for retrieving a pipe's state
+    /// flag.
+    #[inline]
+    pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
+        if blocked_task_ptr & 0x1 == 0 {
+            Owned(cast::transmute(blocked_task_ptr))
         } else {
-            rterrln!("stack overflow in non-task context");
+            let ptr: ~UnsafeArc<AtomicUint> =
+                cast::transmute(blocked_task_ptr & !1);
+            Shared(*ptr)
         }
-
-        intrinsics::abort();
     }
 }
 
-/// This is the entry point of unwinding for things like lang items and such.
-/// The arguments are normally generated by the compiler, and need to
-/// have static lifetimes.
-pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! {
-    use c_str::CString;
-    use cast::transmute;
+impl Death {
+    pub fn new() -> Death {
+        Death { on_exit: None, }
+    }
 
-    #[inline]
-    fn static_char_ptr(p: *c_char) -> &'static str {
-        let s = unsafe { CString::new(p, false) };
-        match s.as_str() {
-            Some(s) => unsafe { transmute::<&str, &'static str>(s) },
-            None => rtabort!("message wasn't utf8?")
+    /// Collect failure exit codes from children and propagate them to a parent.
+    pub fn collect_failure(&mut self, result: TaskResult) {
+        match self.on_exit.take() {
+            Some(f) => f(result),
+            None => {}
         }
     }
-
-    let msg = static_char_ptr(msg);
-    let file = static_char_ptr(file);
-
-    begin_unwind(msg, file, line as uint)
 }
 
-/// This is the entry point of unwinding for fail!() and assert!().
-pub fn begin_unwind<M: Any + Send>(msg: M, file: &'static str, line: uint) -> ! {
-    use any::AnyRefExt;
-    use rt::in_green_task_context;
-    use rt::local::Local;
-    use rt::task::Task;
-    use str::Str;
-    use unstable::intrinsics;
-
-    unsafe {
-        let task: *mut Task;
-        // Note that this should be the only allocation performed in this block.
-        // Currently this means that fail!() on OOM will invoke this code path,
-        // but then again we're not really ready for failing on OOM anyway. If
-        // we do start doing this, then we should propagate this allocation to
-        // be performed in the parent of this task instead of the task that's
-        // failing.
-        let msg = ~msg as ~Any;
-
-        {
-            //let msg: &Any = msg;
-            let msg_s = match msg.as_ref::<&'static str>() {
-                Some(s) => *s,
-                None => match msg.as_ref::<~str>() {
-                    Some(s) => s.as_slice(),
-                    None => "~Any",
-                }
-            };
-
-            if !in_green_task_context() {
-                rterrln!("failed in non-task context at '{}', {}:{}",
-                         msg_s, file, line);
-                intrinsics::abort();
-            }
-
-            task = Local::unsafe_borrow();
-            let n = (*task).name.as_ref().map(|n| n.as_slice()).unwrap_or("<unnamed>");
-
-            // XXX: this should no get forcibly printed to the console, this should
-            //      either be sent to the parent task (ideally), or get printed to
-            //      the task's logger. Right now the logger is actually a uvio
-            //      instance, which uses unkillable blocks internally for various
-            //      reasons. This will cause serious trouble if the task is failing
-            //      due to mismanagment of its own kill flag, so calling our own
-            //      logger in its current state is a bit of a problem.
-
-            rterrln!("task '{}' failed at '{}', {}:{}", n, msg_s, file, line);
-
-            if (*task).unwinder.unwinding {
-                rtabort!("unwinding again");
-            }
-        }
-
-        (*task).unwinder.begin_unwind(msg);
+impl Drop for Death {
+    fn drop(&mut self) {
+        // make this type noncopyable
     }
 }
 
 #[cfg(test)]
 mod test {
     use super::*;
-    use rt::test::*;
     use prelude::*;
+    use task;
 
     #[test]
     fn local_heap() {
-        do run_in_newsched_task() {
-            let a = @5;
-            let b = a;
-            assert!(*a == 5);
-            assert!(*b == 5);
-        }
+        let a = @5;
+        let b = a;
+        assert!(*a == 5);
+        assert!(*b == 5);
     }
 
     #[test]
     fn tls() {
         use local_data;
-        do run_in_newsched_task() {
-            local_data_key!(key: @~str)
-            local_data::set(key, @~"data");
-            assert!(*local_data::get(key, |k| k.map(|k| *k)).unwrap() == ~"data");
-            local_data_key!(key2: @~str)
-            local_data::set(key2, @~"data");
-            assert!(*local_data::get(key2, |k| k.map(|k| *k)).unwrap() == ~"data");
-        }
+        local_data_key!(key: @~str)
+        local_data::set(key, @~"data");
+        assert!(*local_data::get(key, |k| k.map(|k| *k)).unwrap() == ~"data");
+        local_data_key!(key2: @~str)
+        local_data::set(key2, @~"data");
+        assert!(*local_data::get(key2, |k| k.map(|k| *k)).unwrap() == ~"data");
     }
 
     #[test]
     fn unwind() {
-        do run_in_newsched_task() {
-            let result = spawntask_try(proc()());
-            rtdebug!("trying first assert");
-            assert!(result.is_ok());
-            let result = spawntask_try(proc() fail!());
-            rtdebug!("trying second assert");
-            assert!(result.is_err());
-        }
+        let result = task::try(proc()());
+        rtdebug!("trying first assert");
+        assert!(result.is_ok());
+        let result = task::try::<()>(proc() fail!());
+        rtdebug!("trying second assert");
+        assert!(result.is_err());
     }
 
     #[test]
     fn rng() {
-        do run_in_uv_task() {
-            use rand::{rng, Rng};
-            let mut r = rng();
-            let _ = r.next_u32();
-        }
+        use rand::{rng, Rng};
+        let mut r = rng();
+        let _ = r.next_u32();
     }
 
     #[test]
     fn logging() {
-        do run_in_uv_task() {
-            info!("here i am. logging in a newsched task");
-        }
+        info!("here i am. logging in a newsched task");
     }
 
     #[test]
     fn comm_stream() {
-        do run_in_newsched_task() {
-            let (port, chan) = Chan::new();
-            chan.send(10);
-            assert!(port.recv() == 10);
-        }
+        let (port, chan) = Chan::new();
+        chan.send(10);
+        assert!(port.recv() == 10);
     }
 
     #[test]
     fn comm_shared_chan() {
-        do run_in_newsched_task() {
-            let (port, chan) = SharedChan::new();
-            chan.send(10);
-            assert!(port.recv() == 10);
-        }
+        let (port, chan) = SharedChan::new();
+        chan.send(10);
+        assert!(port.recv() == 10);
     }
 
     #[test]
     fn heap_cycles() {
         use option::{Option, Some, None};
 
-        do run_in_newsched_task {
-            struct List {
-                next: Option<@mut List>,
-            }
+        struct List {
+            next: Option<@mut List>,
+        }
 
-            let a = @mut List { next: None };
-            let b = @mut List { next: Some(a) };
+        let a = @mut List { next: None };
+        let b = @mut List { next: Some(a) };
 
-            a.next = Some(b);
-        }
+        a.next = Some(b);
     }
 
     #[test]
     #[should_fail]
-    fn test_begin_unwind() { begin_unwind("cause", file!(), line!()) }
+    fn test_begin_unwind() {
+        use rt::unwind::begin_unwind;
+        begin_unwind("cause", file!(), line!())
+    }
+
+    // Task blocking tests
+
+    #[test]
+    fn block_and_wake() {
+        let task = ~Task::new();
+        let mut task = BlockedTask::block(task).wake().unwrap();
+        task.destroyed = true;
+    }
 }
diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs
deleted file mode 100644
index 2b48b396c99..00000000000
--- a/src/libstd/rt/test.rs
+++ /dev/null
@@ -1,440 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-use io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
-
-use clone::Clone;
-use container::Container;
-use iter::{Iterator, range};
-use option::{Some, None};
-use os;
-use path::GenericPath;
-use path::Path;
-use rand::Rng;
-use rand;
-use result::{Result, Ok, Err};
-use rt::basic;
-use rt::deque::BufferPool;
-use comm::Chan;
-use rt::new_event_loop;
-use rt::sched::Scheduler;
-use rt::sleeper_list::SleeperList;
-use rt::task::Task;
-use rt::thread::Thread;
-use task::TaskResult;
-use unstable::{run_in_bare_thread};
-use vec;
-use vec::{OwnedVector, MutableVector, ImmutableVector};
-
-pub fn new_test_uv_sched() -> Scheduler {
-
-    let mut pool = BufferPool::new();
-    let (worker, stealer) = pool.deque();
-
-    let mut sched = Scheduler::new(new_event_loop(),
-                                   worker,
-                                   ~[stealer],
-                                   SleeperList::new());
-
-    // Don't wait for the Shutdown message
-    sched.no_sleep = true;
-    return sched;
-
-}
-
-pub fn new_test_sched() -> Scheduler {
-    let mut pool = BufferPool::new();
-    let (worker, stealer) = pool.deque();
-
-    let mut sched = Scheduler::new(basic::event_loop(),
-                                   worker,
-                                   ~[stealer],
-                                   SleeperList::new());
-
-    // Don't wait for the Shutdown message
-    sched.no_sleep = true;
-    return sched;
-}
-
-pub fn run_in_uv_task(f: proc()) {
-    do run_in_bare_thread {
-        run_in_uv_task_core(f);
-    }
-}
-
-pub fn run_in_newsched_task(f: proc()) {
-    do run_in_bare_thread {
-        run_in_newsched_task_core(f);
-    }
-}
-
-pub fn run_in_uv_task_core(f: proc()) {
-
-    use rt::sched::Shutdown;
-
-    let mut sched = ~new_test_uv_sched();
-    let exit_handle = sched.make_handle();
-
-    let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) {
-        let mut exit_handle = exit_handle;
-        exit_handle.send(Shutdown);
-        rtassert!(exit_status.is_ok());
-    };
-    let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
-    task.death.on_exit = Some(on_exit);
-
-    sched.bootstrap(task);
-}
-
-pub fn run_in_newsched_task_core(f: proc()) {
-    use rt::sched::Shutdown;
-
-    let mut sched = ~new_test_sched();
-    let exit_handle = sched.make_handle();
-
-    let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) {
-        let mut exit_handle = exit_handle;
-        exit_handle.send(Shutdown);
-        rtassert!(exit_status.is_ok());
-    };
-    let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
-    task.death.on_exit = Some(on_exit);
-
-    sched.bootstrap(task);
-}
-
-#[cfg(target_os="macos")]
-#[allow(non_camel_case_types)]
-mod darwin_fd_limit {
-    /*!
-     * darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the
-     * rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low
-     * for our multithreaded scheduler testing, depending on the number of cores available.
-     *
-     * This fixes issue #7772.
-     */
-
-    use libc;
-    type rlim_t = libc::uint64_t;
-    struct rlimit {
-        rlim_cur: rlim_t,
-        rlim_max: rlim_t
-    }
-    #[nolink]
-    extern {
-        // name probably doesn't need to be mut, but the C function doesn't specify const
-        fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint,
-                  oldp: *mut libc::c_void, oldlenp: *mut libc::size_t,
-                  newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int;
-        fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int;
-        fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int;
-    }
-    static CTL_KERN: libc::c_int = 1;
-    static KERN_MAXFILESPERPROC: libc::c_int = 29;
-    static RLIMIT_NOFILE: libc::c_int = 8;
-
-    pub unsafe fn raise_fd_limit() {
-        // The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc
-        // sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value.
-        use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null};
-        use mem::size_of_val;
-        use os::last_os_error;
-
-        // Fetch the kern.maxfilesperproc value
-        let mut mib: [libc::c_int, ..2] = [CTL_KERN, KERN_MAXFILESPERPROC];
-        let mut maxfiles: libc::c_int = 0;
-        let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t;
-        if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2,
-                  to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void,
-                  to_mut_unsafe_ptr(&mut size),
-                  mut_null(), 0) != 0 {
-            let err = last_os_error();
-            error!("raise_fd_limit: error calling sysctl: {}", err);
-            return;
-        }
-
-        // Fetch the current resource limits
-        let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0};
-        if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim)) != 0 {
-            let err = last_os_error();
-            error!("raise_fd_limit: error calling getrlimit: {}", err);
-            return;
-        }
-
-        // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit
-        rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max);
-
-        // Set our newly-increased resource limit
-        if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim)) != 0 {
-            let err = last_os_error();
-            error!("raise_fd_limit: error calling setrlimit: {}", err);
-            return;
-        }
-    }
-}
-
-#[cfg(not(target_os="macos"))]
-mod darwin_fd_limit {
-    pub unsafe fn raise_fd_limit() {}
-}
-
-#[doc(hidden)]
-pub fn prepare_for_lots_of_tests() {
-    // Bump the fd limit on OS X. See darwin_fd_limit for an explanation.
-    unsafe { darwin_fd_limit::raise_fd_limit() }
-}
-
-/// Create more than one scheduler and run a function in a task
-/// in one of the schedulers. The schedulers will stay alive
-/// until the function `f` returns.
-pub fn run_in_mt_newsched_task(f: proc()) {
-    use os;
-    use from_str::FromStr;
-    use rt::sched::Shutdown;
-    use rt::util;
-
-    // see comment in other function (raising fd limits)
-    prepare_for_lots_of_tests();
-
-    do run_in_bare_thread {
-        let nthreads = match os::getenv("RUST_RT_TEST_THREADS") {
-            Some(nstr) => FromStr::from_str(nstr).unwrap(),
-            None => {
-                if util::limit_thread_creation_due_to_osx_and_valgrind() {
-                    1
-                } else {
-                    // Using more threads than cores in test code
-                    // to force the OS to preempt them frequently.
-                    // Assuming that this help stress test concurrent types.
-                    util::num_cpus() * 2
-                }
-            }
-        };
-
-        let sleepers = SleeperList::new();
-
-        let mut handles = ~[];
-        let mut scheds = ~[];
-
-        let mut pool = BufferPool::<~Task>::new();
-        let workers = range(0, nthreads).map(|_| pool.deque());
-        let (workers, stealers) = vec::unzip(workers);
-
-        for worker in workers.move_iter() {
-            let loop_ = new_event_loop();
-            let mut sched = ~Scheduler::new(loop_,
-                                            worker,
-                                            stealers.clone(),
-                                            sleepers.clone());
-            let handle = sched.make_handle();
-
-            handles.push(handle);
-            scheds.push(sched);
-        }
-
-        let handles = handles;  // Work around not being able to capture mut
-        let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) {
-            // Tell schedulers to exit
-            let mut handles = handles;
-            for handle in handles.mut_iter() {
-                handle.send(Shutdown);
-            }
-
-            rtassert!(exit_status.is_ok());
-        };
-        let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool,
-                                            None,
-                                            f);
-        main_task.death.on_exit = Some(on_exit);
-
-        let mut threads = ~[];
-
-        let main_thread = {
-            let sched = scheds.pop();
-            let main_task = main_task;
-            do Thread::start {
-                sched.bootstrap(main_task);
-            }
-        };
-        threads.push(main_thread);
-
-        while !scheds.is_empty() {
-            let mut sched = scheds.pop();
-            let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || {
-                rtdebug!("bootstrapping non-primary scheduler");
-            };
-            let sched = sched;
-            let thread = do Thread::start {
-                sched.bootstrap(bootstrap_task);
-            };
-
-            threads.push(thread);
-        }
-
-        // Wait for schedulers
-        for thread in threads.move_iter() {
-            thread.join();
-        }
-    }
-
-}
-
-/// Test tasks will abort on failure instead of unwinding
-pub fn spawntask(f: proc()) {
-    Scheduler::run_task(Task::build_child(None, f));
-}
-
-/// Create a new task and run it right now. Aborts on failure
-pub fn spawntask_later(f: proc()) {
-    Scheduler::run_task_later(Task::build_child(None, f));
-}
-
-pub fn spawntask_random(f: proc()) {
-    use rand::{Rand, rng};
-
-    let mut rng = rng();
-    let run_now: bool = Rand::rand(&mut rng);
-
-    if run_now {
-        spawntask(f)
-    } else {
-        spawntask_later(f)
-    }
-}
-
-pub fn spawntask_try(f: proc()) -> Result<(),()> {
-
-    let (port, chan) = Chan::new();
-    let on_exit: proc(TaskResult) = proc(exit_status) {
-        chan.send(exit_status)
-    };
-
-    let mut new_task = Task::build_root(None, f);
-    new_task.death.on_exit = Some(on_exit);
-
-    Scheduler::run_task(new_task);
-
-    let exit_status = port.recv();
-    if exit_status.is_ok() { Ok(()) } else { Err(()) }
-
-}
-
-/// Spawn a new task in a new scheduler and return a thread handle.
-pub fn spawntask_thread(f: proc()) -> Thread<()> {
-    let thread = do Thread::start {
-        run_in_newsched_task_core(f);
-    };
-
-    return thread;
-}
-
-/// Get a ~Task for testing purposes other than actually scheduling it.
-pub fn with_test_task(blk: proc(~Task) -> ~Task) {
-    do run_in_bare_thread {
-        let mut sched = ~new_test_sched();
-        let task = blk(~Task::new_root(&mut sched.stack_pool,
-                                       None,
-                                       proc() {}));
-        cleanup_task(task);
-    }
-}
-
-/// Use to cleanup tasks created for testing but not "run".
-pub fn cleanup_task(mut task: ~Task) {
-    task.destroyed = true;
-}
-
-/// Get a port number, starting at 9600, for use in tests
-pub fn next_test_port() -> u16 {
-    use unstable::mutex::{Mutex, MUTEX_INIT};
-    static mut lock: Mutex = MUTEX_INIT;
-    static mut next_offset: u16 = 0;
-    unsafe {
-        let base = base_port();
-        lock.lock();
-        let ret = base + next_offset;
-        next_offset += 1;
-        lock.unlock();
-        return ret;
-    }
-}
-
-/// Get a temporary path which could be the location of a unix socket
-pub fn next_test_unix() -> Path {
-    if cfg!(unix) {
-        os::tmpdir().join(rand::task_rng().gen_ascii_str(20))
-    } else {
-        Path::new(r"\\.\pipe\" + rand::task_rng().gen_ascii_str(20))
-    }
-}
-
-/// Get a unique IPv4 localhost:port pair starting at 9600
-pub fn next_test_ip4() -> SocketAddr {
-    SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() }
-}
-
-/// Get a unique IPv6 localhost:port pair starting at 9600
-pub fn next_test_ip6() -> SocketAddr {
-    SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() }
-}
-
-/*
-XXX: Welcome to MegaHack City.
-
-The bots run multiple builds at the same time, and these builds
-all want to use ports. This function figures out which workspace
-it is running in and assigns a port range based on it.
-*/
-fn base_port() -> u16 {
-    use os;
-    use str::StrSlice;
-    use vec::ImmutableVector;
-
-    let base = 9600u16;
-    let range = 1000u16;
-
-    let bases = [
-        ("32-opt", base + range * 1),
-        ("32-noopt", base + range * 2),
-        ("64-opt", base + range * 3),
-        ("64-noopt", base + range * 4),
-        ("64-opt-vg", base + range * 5),
-        ("all-opt", base + range * 6),
-        ("snap3", base + range * 7),
-        ("dist", base + range * 8)
-    ];
-
-    // FIXME (#9639): This needs to handle non-utf8 paths
-    let path = os::getcwd();
-    let path_s = path.as_str().unwrap();
-
-    let mut final_base = base;
-
-    for &(dir, base) in bases.iter() {
-        if path_s.contains(dir) {
-            final_base = base;
-            break;
-        }
-    }
-
-    return final_base;
-}
-
-/// Get a constant that represents the number of times to repeat
-/// stress tests. Default 1.
-pub fn stress_factor() -> uint {
-    use os::getenv;
-    use from_str::from_str;
-
-    match getenv("RUST_RT_STRESS") {
-        Some(val) => from_str::<uint>(val).unwrap(),
-        None => 1
-    }
-}
diff --git a/src/libstd/rt/thread.rs b/src/libstd/rt/thread.rs
index 6128f310a2e..f4f4aaa2765 100644
--- a/src/libstd/rt/thread.rs
+++ b/src/libstd/rt/thread.rs
@@ -33,7 +33,7 @@ pub struct Thread<T> {
     priv packet: ~Option<T>,
 }
 
-static DEFAULT_STACK_SIZE: libc::size_t = 1024 * 1024;
+static DEFAULT_STACK_SIZE: uint = 1024 * 1024;
 
 // This is the starting point of rust os threads. The first thing we do
 // is make sure that we don't trigger __morestack (also why this has a
@@ -41,9 +41,9 @@ static DEFAULT_STACK_SIZE: libc::size_t = 1024 * 1024;
 // and invoke it.
 #[no_split_stack]
 extern fn thread_start(main: *libc::c_void) -> imp::rust_thread_return {
-    use rt::context;
+    use unstable::stack;
     unsafe {
-        context::record_stack_bounds(0, uint::max_value);
+        stack::record_stack_bounds(0, uint::max_value);
         let f: ~proc() = cast::transmute(main);
         (*f)();
         cast::transmute(0 as imp::rust_thread_return)
@@ -69,6 +69,12 @@ impl Thread<()> {
     /// called, when the `Thread` falls out of scope its destructor will block
     /// waiting for the OS thread.
     pub fn start<T: Send>(main: proc() -> T) -> Thread<T> {
+        Thread::start_stack(DEFAULT_STACK_SIZE, main)
+    }
+
+    /// Performs the same functionality as `start`, but specifies an explicit
+    /// stack size for the new thread.
+    pub fn start_stack<T: Send>(stack: uint, main: proc() -> T) -> Thread<T> {
 
         // We need the address of the packet to fill in to be stable so when
         // `main` fills it in it's still valid, so allocate an extra ~ box to do
@@ -78,7 +84,7 @@ impl Thread<()> {
             *cast::transmute::<&~Option<T>, **mut Option<T>>(&packet)
         };
         let main: proc() = proc() unsafe { *packet2 = Some(main()); };
-        let native = unsafe { imp::create(~main) };
+        let native = unsafe { imp::create(stack, ~main) };
 
         Thread {
             native: native,
@@ -94,8 +100,14 @@ impl Thread<()> {
     /// systems. Note that platforms may not keep the main program alive even if
     /// there are detached thread still running around.
     pub fn spawn(main: proc()) {
+        Thread::spawn_stack(DEFAULT_STACK_SIZE, main)
+    }
+
+    /// Performs the same functionality as `spawn`, but explicitly specifies a
+    /// stack size for the new thread.
+    pub fn spawn_stack(stack: uint, main: proc()) {
         unsafe {
-            let handle = imp::create(~main);
+            let handle = imp::create(stack, ~main);
             imp::detach(handle);
         }
     }
@@ -132,8 +144,6 @@ impl<T: Send> Drop for Thread<T> {
 
 #[cfg(windows)]
 mod imp {
-    use super::DEFAULT_STACK_SIZE;
-
     use cast;
     use libc;
     use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL,
@@ -143,9 +153,9 @@ mod imp {
     pub type rust_thread = HANDLE;
     pub type rust_thread_return = DWORD;
 
-    pub unsafe fn create(p: ~proc()) -> rust_thread {
+    pub unsafe fn create(stack: uint, p: ~proc()) -> rust_thread {
         let arg: *mut libc::c_void = cast::transmute(p);
-        CreateThread(ptr::mut_null(), DEFAULT_STACK_SIZE, super::thread_start,
+        CreateThread(ptr::mut_null(), stack as libc::size_t, super::thread_start,
                      arg, 0, ptr::mut_null())
     }
 
@@ -183,17 +193,17 @@ mod imp {
     use libc::consts::os::posix01::PTHREAD_CREATE_JOINABLE;
     use libc;
     use ptr;
-    use super::DEFAULT_STACK_SIZE;
     use unstable::intrinsics;
 
     pub type rust_thread = libc::pthread_t;
     pub type rust_thread_return = *libc::c_void;
 
-    pub unsafe fn create(p: ~proc()) -> rust_thread {
+    pub unsafe fn create(stack: uint, p: ~proc()) -> rust_thread {
         let mut native: libc::pthread_t = intrinsics::uninit();
         let mut attr: libc::pthread_attr_t = intrinsics::uninit();
         assert_eq!(pthread_attr_init(&mut attr), 0);
-        assert_eq!(pthread_attr_setstacksize(&mut attr, DEFAULT_STACK_SIZE), 0);
+        assert_eq!(pthread_attr_setstacksize(&mut attr,
+                                             stack as libc::size_t), 0);
         assert_eq!(pthread_attr_setdetachstate(&mut attr,
                                                PTHREAD_CREATE_JOINABLE), 0);
 
diff --git a/src/libstd/rt/tube.rs b/src/libstd/rt/tube.rs
deleted file mode 100644
index 5e867bcdfba..00000000000
--- a/src/libstd/rt/tube.rs
+++ /dev/null
@@ -1,170 +0,0 @@
-// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-//! A very simple unsynchronized channel type for sending buffered data from
-//! scheduler context to task context.
-//!
-//! XXX: This would be safer to use if split into two types like Port/Chan
-
-use option::*;
-use clone::Clone;
-use super::rc::RC;
-use rt::sched::Scheduler;
-use rt::kill::BlockedTask;
-use rt::local::Local;
-use vec::OwnedVector;
-use container::Container;
-
-struct TubeState<T> {
-    blocked_task: Option<BlockedTask>,
-    buf: ~[T]
-}
-
-pub struct Tube<T> {
-    priv p: RC<TubeState<T>>
-}
-
-impl<T> Tube<T> {
-    pub fn new() -> Tube<T> {
-        Tube {
-            p: RC::new(TubeState {
-                blocked_task: None,
-                buf: ~[]
-            })
-        }
-    }
-
-    pub fn send(&mut self, val: T) {
-        rtdebug!("tube send");
-        unsafe {
-            let state = self.p.unsafe_borrow_mut();
-            (*state).buf.push(val);
-
-            if (*state).blocked_task.is_some() {
-                // There's a waiting task. Wake it up
-                rtdebug!("waking blocked tube");
-                let task = (*state).blocked_task.take_unwrap();
-                let sched: ~Scheduler = Local::take();
-                sched.resume_blocked_task_immediately(task);
-            }
-        }
-    }
-
-    pub fn recv(&mut self) -> T {
-        unsafe {
-            let state = self.p.unsafe_borrow_mut();
-            if !(*state).buf.is_empty() {
-                return (*state).buf.shift();
-            } else {
-                // Block and wait for the next message
-                rtdebug!("blocking on tube recv");
-                assert!(self.p.refcount() > 1); // There better be somebody to wake us up
-                assert!((*state).blocked_task.is_none());
-                let sched: ~Scheduler = Local::take();
-                sched.deschedule_running_task_and_then(|_, task| {
-                    (*state).blocked_task = Some(task);
-                });
-                rtdebug!("waking after tube recv");
-                let buf = &mut (*state).buf;
-                assert!(!buf.is_empty());
-                return buf.shift();
-            }
-        }
-    }
-}
-
-impl<T> Clone for Tube<T> {
-    fn clone(&self) -> Tube<T> {
-        Tube { p: self.p.clone() }
-    }
-}
-
-#[cfg(test)]
-mod test {
-    use rt::test::*;
-    use rt::rtio::EventLoop;
-    use rt::sched::Scheduler;
-    use rt::local::Local;
-    use super::*;
-    use prelude::*;
-
-    #[test]
-    fn simple_test() {
-        do run_in_newsched_task {
-            let mut tube: Tube<int> = Tube::new();
-            let mut tube_clone = Some(tube.clone());
-            let sched: ~Scheduler = Local::take();
-            sched.deschedule_running_task_and_then(|sched, task| {
-                let mut tube_clone = tube_clone.take_unwrap();
-                tube_clone.send(1);
-                sched.enqueue_blocked_task(task);
-            });
-
-            assert!(tube.recv() == 1);
-        }
-    }
-
-    #[test]
-    fn blocking_test() {
-        do run_in_newsched_task {
-            let mut tube: Tube<int> = Tube::new();
-            let mut tube_clone = Some(tube.clone());
-            let sched: ~Scheduler = Local::take();
-            sched.deschedule_running_task_and_then(|sched, task| {
-                let tube_clone = tube_clone.take_unwrap();
-                do sched.event_loop.callback {
-                    let mut tube_clone = tube_clone;
-                    // The task should be blocked on this now and
-                    // sending will wake it up.
-                    tube_clone.send(1);
-                }
-                sched.enqueue_blocked_task(task);
-            });
-
-            assert!(tube.recv() == 1);
-        }
-    }
-
-    #[test]
-    fn many_blocking_test() {
-        static MAX: int = 100;
-
-        do run_in_newsched_task {
-            let mut tube: Tube<int> = Tube::new();
-            let mut tube_clone = Some(tube.clone());
-            let sched: ~Scheduler = Local::take();
-            sched.deschedule_running_task_and_then(|sched, task| {
-                callback_send(tube_clone.take_unwrap(), 0);
-
-                fn callback_send(tube: Tube<int>, i: int) {
-                    if i == 100 {
-                        return
-                    }
-
-                    let mut sched = Local::borrow(None::<Scheduler>);
-                    do sched.get().event_loop.callback {
-                        let mut tube = tube;
-                        // The task should be blocked on this now and
-                        // sending will wake it up.
-                        tube.send(i);
-                        callback_send(tube, i + 1);
-                    }
-                }
-
-                sched.enqueue_blocked_task(task);
-            });
-
-            for i in range(0, MAX) {
-                let j = tube.recv();
-                assert!(j == i);
-            }
-        }
-    }
-}
diff --git a/src/libstd/rt/unwind.rs b/src/libstd/rt/unwind.rs
index 3f6f54a9c0e..9706dbae4c6 100644
--- a/src/libstd/rt/unwind.rs
+++ b/src/libstd/rt/unwind.rs
@@ -8,11 +8,11 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-
 // Implementation of Rust stack unwinding
 //
-// For background on exception handling and stack unwinding please see "Exception Handling in LLVM"
-// (llvm.org/docs/ExceptionHandling.html) and documents linked from it.
+// For background on exception handling and stack unwinding please see
+// "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and
+// documents linked from it.
 // These are also good reads:
 //     http://theofilos.cs.columbia.edu/blog/2013/09/22/base_abi/
 //     http://monoinfinito.wordpress.com/series/exception-handling-in-c/
@@ -21,41 +21,55 @@
 // ~~~ A brief summary ~~~
 // Exception handling happens in two phases: a search phase and a cleanup phase.
 //
-// In both phases the unwinder walks stack frames from top to bottom using information from
-// the stack frame unwind sections of the current process's modules ("module" here refers to
-// an OS module, i.e. an executable or a dynamic library).
+// In both phases the unwinder walks stack frames from top to bottom using
+// information from the stack frame unwind sections of the current process's
+// modules ("module" here refers to an OS module, i.e. an executable or a
+// dynamic library).
 //
-// For each stack frame, it invokes the associated "personality routine", whose address is also
-// stored in the unwind info section.
+// For each stack frame, it invokes the associated "personality routine", whose
+// address is also stored in the unwind info section.
 //
-// In the search phase, the job of a personality routine is to examine exception object being
-// thrown, and to decide whether it should be caught at that stack frame.  Once the handler frame
-// has been identified, cleanup phase begins.
+// In the search phase, the job of a personality routine is to examine exception
+// object being thrown, and to decide whether it should be caught at that stack
+// frame.  Once the handler frame has been identified, cleanup phase begins.
 //
-// In the cleanup phase, personality routines invoke cleanup code associated with their
-// stack frames (i.e. destructors).  Once stack has been unwound down to the handler frame level,
-// unwinding stops and the last personality routine transfers control to its' catch block.
+// In the cleanup phase, personality routines invoke cleanup code associated
+// with their stack frames (i.e. destructors).  Once stack has been unwound down
+// to the handler frame level, unwinding stops and the last personality routine
+// transfers control to its' catch block.
 //
 // ~~~ Frame unwind info registration ~~~
-// Each module has its' own frame unwind info section (usually ".eh_frame"), and unwinder needs
-// to know about all of them in order for unwinding to be able to cross module boundaries.
+// Each module has its' own frame unwind info section (usually ".eh_frame"), and
+// unwinder needs to know about all of them in order for unwinding to be able to
+// cross module boundaries.
 //
-// On some platforms, like Linux, this is achieved by dynamically enumerating currently loaded
-// modules via the dl_iterate_phdr() API and finding all .eh_frame sections.
+// On some platforms, like Linux, this is achieved by dynamically enumerating
+// currently loaded modules via the dl_iterate_phdr() API and finding all
+// .eh_frame sections.
 //
-// Others, like Windows, require modules to actively register their unwind info sections by calling
-// __register_frame_info() API at startup.
-// In the latter case it is essential that there is only one copy of the unwinder runtime
-// in the process.  This is usually achieved by linking to the dynamic version of the unwind
-// runtime.
+// Others, like Windows, require modules to actively register their unwind info
+// sections by calling __register_frame_info() API at startup.  In the latter
+// case it is essential that there is only one copy of the unwinder runtime in
+// the process.  This is usually achieved by linking to the dynamic version of
+// the unwind runtime.
 //
 // Currently Rust uses unwind runtime provided by libgcc.
 
-use prelude::*;
-use cast::transmute;
-use task::TaskResult;
+use any::{Any, AnyRefExt};
+use c_str::CString;
+use cast;
+use kinds::Send;
+use libc::{c_char, size_t};
 use libc::{c_void, c_int};
-use self::libunwind::*;
+use option::{Some, None, Option};
+use result::{Err, Ok};
+use rt::local::Local;
+use rt::task::Task;
+use str::Str;
+use task::TaskResult;
+use unstable::intrinsics;
+
+use uw = self::libunwind;
 
 mod libunwind {
     //! Unwind library interface
@@ -110,34 +124,41 @@ mod libunwind {
 }
 
 pub struct Unwinder {
-    unwinding: bool,
-    cause: Option<~Any>
+    priv unwinding: bool,
+    priv cause: Option<~Any>
 }
 
 impl Unwinder {
+    pub fn new() -> Unwinder {
+        Unwinder {
+            unwinding: false,
+            cause: None,
+        }
+    }
+
+    pub fn unwinding(&self) -> bool {
+        self.unwinding
+    }
 
     pub fn try(&mut self, f: ||) {
         use unstable::raw::Closure;
 
         unsafe {
-            let closure: Closure = transmute(f);
-            let code = transmute(closure.code);
-            let env = transmute(closure.env);
-
-            let ep = rust_try(try_fn, code, env);
+            let closure: Closure = cast::transmute(f);
+            let ep = rust_try(try_fn, closure.code as *c_void,
+                              closure.env as *c_void);
             if !ep.is_null() {
                 rtdebug!("Caught {}", (*ep).exception_class);
-                _Unwind_DeleteException(ep);
+                uw::_Unwind_DeleteException(ep);
             }
         }
 
         extern fn try_fn(code: *c_void, env: *c_void) {
             unsafe {
-                let closure: Closure = Closure {
-                    code: transmute(code),
-                    env: transmute(env),
-                };
-                let closure: || = transmute(closure);
+                let closure: || = cast::transmute(Closure {
+                    code: code as *(),
+                    env: env as *(),
+                });
                 closure();
             }
         }
@@ -145,10 +166,11 @@ impl Unwinder {
         extern {
             // Rust's try-catch
             // When f(...) returns normally, the return value is null.
-            // When f(...) throws, the return value is a pointer to the caught exception object.
+            // When f(...) throws, the return value is a pointer to the caught
+            // exception object.
             fn rust_try(f: extern "C" fn(*c_void, *c_void),
                         code: *c_void,
-                        data: *c_void) -> *_Unwind_Exception;
+                        data: *c_void) -> *uw::_Unwind_Exception;
         }
     }
 
@@ -159,21 +181,21 @@ impl Unwinder {
         self.cause = Some(cause);
 
         unsafe {
-            let exception = ~_Unwind_Exception {
+            let exception = ~uw::_Unwind_Exception {
                 exception_class: rust_exception_class(),
                 exception_cleanup: exception_cleanup,
                 private_1: 0,
                 private_2: 0
             };
-            let error = _Unwind_RaiseException(transmute(exception));
+            let error = uw::_Unwind_RaiseException(cast::transmute(exception));
             rtabort!("Could not unwind stack, error = {}", error as int)
         }
 
-        extern "C" fn exception_cleanup(_unwind_code: _Unwind_Reason_Code,
-                                        exception: *_Unwind_Exception) {
+        extern "C" fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code,
+                                        exception: *uw::_Unwind_Exception) {
             rtdebug!("exception_cleanup()");
             unsafe {
-                let _: ~_Unwind_Exception = transmute(exception);
+                let _: ~uw::_Unwind_Exception = cast::transmute(exception);
             }
         }
     }
@@ -189,68 +211,146 @@ impl Unwinder {
 
 // Rust's exception class identifier.  This is used by personality routines to
 // determine whether the exception was thrown by their own runtime.
-fn rust_exception_class() -> _Unwind_Exception_Class {
-    let bytes = bytes!("MOZ\0RUST"); // vendor, language
-    unsafe {
-        let ptr: *_Unwind_Exception_Class = transmute(bytes.as_ptr());
-        *ptr
-    }
+fn rust_exception_class() -> uw::_Unwind_Exception_Class {
+    // M O Z \0  R U S T -- vendor, language
+    0x4d4f5a_00_52555354
 }
 
-
-// We could implement our personality routine in pure Rust, however exception info decoding
-// is tedious.  More importantly, personality routines have to handle various platform
-// quirks, which are not fun to maintain.  For this reason, we attempt to reuse personality
-// routine of the C language: __gcc_personality_v0.
+// We could implement our personality routine in pure Rust, however exception
+// info decoding is tedious.  More importantly, personality routines have to
+// handle various platform quirks, which are not fun to maintain.  For this
+// reason, we attempt to reuse personality routine of the C language:
+// __gcc_personality_v0.
 //
-// Since C does not support exception catching, __gcc_personality_v0 simply always
-// returns _URC_CONTINUE_UNWIND in search phase, and always returns _URC_INSTALL_CONTEXT
-// (i.e. "invoke cleanup code") in cleanup phase.
+// Since C does not support exception catching, __gcc_personality_v0 simply
+// always returns _URC_CONTINUE_UNWIND in search phase, and always returns
+// _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase.
 //
-// This is pretty close to Rust's exception handling approach, except that Rust does have
-// a single "catch-all" handler at the bottom of each task's stack.
+// This is pretty close to Rust's exception handling approach, except that Rust
+// does have a single "catch-all" handler at the bottom of each task's stack.
 // So we have two versions:
-// - rust_eh_personality, used by all cleanup landing pads, which never catches, so
-//   the behavior of __gcc_personality_v0 is perfectly adequate there, and
-// - rust_eh_personality_catch, used only by rust_try(), which always catches.  This is
-//   achieved by overriding the return value in search phase to always say "catch!".
+// - rust_eh_personality, used by all cleanup landing pads, which never catches,
+//   so the behavior of __gcc_personality_v0 is perfectly adequate there, and
+// - rust_eh_personality_catch, used only by rust_try(), which always catches.
+//   This is achieved by overriding the return value in search phase to always
+//   say "catch!".
 
 extern "C" {
     fn __gcc_personality_v0(version: c_int,
-                            actions: _Unwind_Action,
-                            exception_class: _Unwind_Exception_Class,
-                            ue_header: *_Unwind_Exception,
-                            context: *_Unwind_Context) -> _Unwind_Reason_Code;
+                            actions: uw::_Unwind_Action,
+                            exception_class: uw::_Unwind_Exception_Class,
+                            ue_header: *uw::_Unwind_Exception,
+                            context: *uw::_Unwind_Context)
+        -> uw::_Unwind_Reason_Code;
 }
 
 #[lang="eh_personality"]
 #[no_mangle] // so we can reference it by name from middle/trans/base.rs
 #[doc(hidden)]
 #[cfg(not(test))]
-pub extern "C" fn rust_eh_personality(version: c_int,
-                                      actions: _Unwind_Action,
-                                      exception_class: _Unwind_Exception_Class,
-                                      ue_header: *_Unwind_Exception,
-                                      context: *_Unwind_Context) -> _Unwind_Reason_Code {
+pub extern "C" fn rust_eh_personality(
+    version: c_int,
+    actions: uw::_Unwind_Action,
+    exception_class: uw::_Unwind_Exception_Class,
+    ue_header: *uw::_Unwind_Exception,
+    context: *uw::_Unwind_Context
+) -> uw::_Unwind_Reason_Code
+{
     unsafe {
-        __gcc_personality_v0(version, actions, exception_class, ue_header, context)
+        __gcc_personality_v0(version, actions, exception_class, ue_header,
+                             context)
     }
 }
 
 #[no_mangle] // referenced from rust_try.ll
 #[doc(hidden)]
 #[cfg(not(test))]
-pub extern "C" fn rust_eh_personality_catch(version: c_int,
-                                            actions: _Unwind_Action,
-                                            exception_class: _Unwind_Exception_Class,
-                                            ue_header: *_Unwind_Exception,
-                                            context: *_Unwind_Context) -> _Unwind_Reason_Code {
-    if (actions as c_int & _UA_SEARCH_PHASE as c_int) != 0 { // search phase
-        _URC_HANDLER_FOUND // catch!
+pub extern "C" fn rust_eh_personality_catch(
+    version: c_int,
+    actions: uw::_Unwind_Action,
+    exception_class: uw::_Unwind_Exception_Class,
+    ue_header: *uw::_Unwind_Exception,
+    context: *uw::_Unwind_Context
+) -> uw::_Unwind_Reason_Code
+{
+    if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase
+        uw::_URC_HANDLER_FOUND // catch!
     }
     else { // cleanup phase
         unsafe {
-             __gcc_personality_v0(version, actions, exception_class, ue_header, context)
+             __gcc_personality_v0(version, actions, exception_class, ue_header,
+                                  context)
         }
     }
 }
+
+/// This is the entry point of unwinding for things like lang items and such.
+/// The arguments are normally generated by the compiler, and need to
+/// have static lifetimes.
+pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! {
+    #[inline]
+    fn static_char_ptr(p: *c_char) -> &'static str {
+        let s = unsafe { CString::new(p, false) };
+        match s.as_str() {
+            Some(s) => unsafe { cast::transmute::<&str, &'static str>(s) },
+            None => rtabort!("message wasn't utf8?")
+        }
+    }
+
+    let msg = static_char_ptr(msg);
+    let file = static_char_ptr(file);
+
+    begin_unwind(msg, file, line as uint)
+}
+
+/// This is the entry point of unwinding for fail!() and assert!().
+pub fn begin_unwind<M: Any + Send>(msg: M, file: &'static str, line: uint) -> ! {
+    unsafe {
+        let task: *mut Task;
+        // Note that this should be the only allocation performed in this block.
+        // Currently this means that fail!() on OOM will invoke this code path,
+        // but then again we're not really ready for failing on OOM anyway. If
+        // we do start doing this, then we should propagate this allocation to
+        // be performed in the parent of this task instead of the task that's
+        // failing.
+        let msg = ~msg as ~Any;
+
+        {
+            let msg_s = match msg.as_ref::<&'static str>() {
+                Some(s) => *s,
+                None => match msg.as_ref::<~str>() {
+                    Some(s) => s.as_slice(),
+                    None => "~Any",
+                }
+            };
+
+            // It is assumed that all reasonable rust code will have a local
+            // task at all times. This means that this `try_unsafe_borrow` will
+            // succeed almost all of the time. There are border cases, however,
+            // when the runtime has *almost* set up the local task, but hasn't
+            // quite gotten there yet. In order to get some better diagnostics,
+            // we print on failure and immediately abort the whole process if
+            // there is no local task available.
+            match Local::try_unsafe_borrow() {
+                Some(t) => {
+                    task = t;
+                    let n = (*task).name.as_ref()
+                                   .map(|n| n.as_slice()).unwrap_or("<unnamed>");
+
+                    rterrln!("task '{}' failed at '{}', {}:{}", n, msg_s,
+                             file, line);
+                }
+                None => {
+                    rterrln!("failed at '{}', {}:{}", msg_s, file, line);
+                    intrinsics::abort();
+                }
+            }
+
+            if (*task).unwinder.unwinding {
+                rtabort!("unwinding again");
+            }
+        }
+
+        (*task).unwinder.begin_unwind(msg);
+    }
+}
diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs
index 93721986f3c..730a38ce886 100644
--- a/src/libstd/rt/util.rs
+++ b/src/libstd/rt/util.rs
@@ -15,7 +15,6 @@ use libc;
 use option::{Some, None, Option};
 use os;
 use str::StrSlice;
-use unstable::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst};
 use unstable::running_on_valgrind;
 
 // Indicates whether we should perform expensive sanity checks, including rtassert!
@@ -68,11 +67,21 @@ pub fn default_sched_threads() -> uint {
 }
 
 pub fn dumb_println(args: &fmt::Arguments) {
-    use io::native::file::FileDesc;
     use io;
     use libc;
-    let mut out = FileDesc::new(libc::STDERR_FILENO, false);
-    fmt::writeln(&mut out as &mut io::Writer, args);
+
+    struct Stderr;
+    impl io::Writer for Stderr {
+        fn write(&mut self, data: &[u8]) {
+            unsafe {
+                libc::write(libc::STDERR_FILENO,
+                            data.as_ptr() as *libc::c_void,
+                            data.len() as libc::size_t);
+            }
+        }
+    }
+    let mut w = Stderr;
+    fmt::writeln(&mut w as &mut io::Writer, args);
 }
 
 pub fn abort(msg: &str) -> ! {
@@ -133,13 +142,3 @@ memory and partly incapable of presentation to others.",
         unsafe { libc::abort() }
     }
 }
-
-static mut EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT;
-
-pub fn set_exit_status(code: int) {
-    unsafe { EXIT_STATUS.store(code, SeqCst) }
-}
-
-pub fn get_exit_status() -> int {
-    unsafe { EXIT_STATUS.load(SeqCst) }
-}
diff --git a/src/libstd/run.rs b/src/libstd/run.rs
index d92291bbfbd..69704c855ee 100644
--- a/src/libstd/run.rs
+++ b/src/libstd/run.rs
@@ -338,8 +338,8 @@ mod tests {
     use str;
     use task::spawn;
     use unstable::running_on_valgrind;
-    use io::native::file;
-    use io::{FileNotFound, Reader, Writer, io_error};
+    use io::pipe::PipeStream;
+    use io::{Writer, Reader, io_error, FileNotFound, OtherIoError};
 
     #[test]
     #[cfg(not(target_os="android"))] // FIXME(#10380)
@@ -426,13 +426,13 @@ mod tests {
     }
 
     fn writeclose(fd: c_int, s: &str) {
-        let mut writer = file::FileDesc::new(fd, true);
+        let mut writer = PipeStream::open(fd);
         writer.write(s.as_bytes());
     }
 
     fn readclose(fd: c_int) -> ~str {
         let mut res = ~[];
-        let mut reader = file::FileDesc::new(fd, true);
+        let mut reader = PipeStream::open(fd);
         let mut buf = [0, ..1024];
         loop {
             match reader.read(buf) {
diff --git a/src/libstd/sync/arc.rs b/src/libstd/sync/arc.rs
new file mode 100644
index 00000000000..7b94a3acc2b
--- /dev/null
+++ b/src/libstd/sync/arc.rs
@@ -0,0 +1,152 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Atomically reference counted data
+//!
+//! This modules contains the implementation of an atomically reference counted
+//! pointer for the purpose of sharing data between tasks. This is obviously a
+//! very unsafe primitive to use, but it has its use cases when implementing
+//! concurrent data structures and similar tasks.
+//!
+//! Great care must be taken to ensure that data races do not arise through the
+//! usage of `UnsafeArc`, and this often requires some form of external
+//! synchronization. The only guarantee provided to you by this class is that
+//! the underlying data will remain valid (not free'd) so long as the reference
+//! count is greater than one.
+
+use cast;
+use clone::Clone;
+use kinds::Send;
+use ops::Drop;
+use ptr::RawPtr;
+use sync::atomics::{AtomicUint, SeqCst, Relaxed, Acquire};
+use vec;
+
+/// An atomically reference counted pointer.
+///
+/// Enforces no shared-memory safety.
+#[unsafe_no_drop_flag]
+pub struct UnsafeArc<T> {
+    priv data: *mut ArcData<T>,
+}
+
+struct ArcData<T> {
+    count: AtomicUint,
+    data: T,
+}
+
+unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> {
+    let data = ~ArcData { count: AtomicUint::new(refcount), data: data };
+    cast::transmute(data)
+}
+
+impl<T: Send> UnsafeArc<T> {
+    /// Creates a new `UnsafeArc` which wraps the given data.
+    pub fn new(data: T) -> UnsafeArc<T> {
+        unsafe { UnsafeArc { data: new_inner(data, 1) } }
+    }
+
+    /// As new(), but returns an extra pre-cloned handle.
+    pub fn new2(data: T) -> (UnsafeArc<T>, UnsafeArc<T>) {
+        unsafe {
+            let ptr = new_inner(data, 2);
+            (UnsafeArc { data: ptr }, UnsafeArc { data: ptr })
+        }
+    }
+
+    /// As new(), but returns a vector of as many pre-cloned handles as
+    /// requested.
+    pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc<T>] {
+        unsafe {
+            if num_handles == 0 {
+                ~[] // need to free data here
+            } else {
+                let ptr = new_inner(data, num_handles);
+                vec::from_fn(num_handles, |_| UnsafeArc { data: ptr })
+            }
+        }
+    }
+
+    /// Gets a pointer to the inner shared data. Note that care must be taken to
+    /// ensure that the outer `UnsafeArc` does not fall out of scope while this
+    /// pointer is in use, otherwise it could possibly contain a use-after-free.
+    #[inline]
+    pub fn get(&self) -> *mut T {
+        unsafe {
+            assert!((*self.data).count.load(Relaxed) > 0);
+            return &mut (*self.data).data as *mut T;
+        }
+    }
+
+    /// Gets an immutable pointer to the inner shared data. This has the same
+    /// caveats as the `get` method.
+    #[inline]
+    pub fn get_immut(&self) -> *T {
+        unsafe {
+            assert!((*self.data).count.load(Relaxed) > 0);
+            return &(*self.data).data as *T;
+        }
+    }
+}
+
+impl<T: Send> Clone for UnsafeArc<T> {
+    fn clone(&self) -> UnsafeArc<T> {
+        unsafe {
+            // This barrier might be unnecessary, but I'm not sure...
+            let old_count = (*self.data).count.fetch_add(1, Acquire);
+            assert!(old_count >= 1);
+            return UnsafeArc { data: self.data };
+        }
+    }
+}
+
+#[unsafe_destructor]
+impl<T> Drop for UnsafeArc<T>{
+    fn drop(&mut self) {
+        unsafe {
+            // Happens when destructing an unwrapper's handle and from
+            // `#[unsafe_no_drop_flag]`
+            if self.data.is_null() {
+                return
+            }
+            // Must be acquire+release, not just release, to make sure this
+            // doesn't get reordered to after the unwrapper pointer load.
+            let old_count = (*self.data).count.fetch_sub(1, SeqCst);
+            assert!(old_count >= 1);
+            if old_count == 1 {
+                let _: ~ArcData<T> = cast::transmute(self.data);
+            }
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use prelude::*;
+    use super::UnsafeArc;
+    use mem::size_of;
+
+    #[test]
+    fn test_size() {
+        assert_eq!(size_of::<UnsafeArc<[int, ..10]>>(), size_of::<*[int, ..10]>());
+    }
+
+    #[test]
+    fn arclike_newN() {
+        // Tests that the many-refcounts-at-once constructors don't leak.
+        let _ = UnsafeArc::new2(~~"hello");
+        let x = UnsafeArc::newN(~~"hello", 0);
+        assert_eq!(x.len(), 0)
+        let x = UnsafeArc::newN(~~"hello", 1);
+        assert_eq!(x.len(), 1)
+        let x = UnsafeArc::newN(~~"hello", 10);
+        assert_eq!(x.len(), 10)
+    }
+}
diff --git a/src/libstd/unstable/atomics.rs b/src/libstd/sync/atomics.rs
index 9aaccb3ebba..bc9d99c0f37 100644
--- a/src/libstd/unstable/atomics.rs
+++ b/src/libstd/sync/atomics.rs
@@ -11,13 +11,16 @@
 /*!
  * Atomic types
  *
- * Basic atomic types supporting atomic operations. Each method takes an `Ordering` which
- * represents the strength of the memory barrier for that operation. These orderings are the same
- * as C++11 atomic orderings [http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync]
+ * Basic atomic types supporting atomic operations. Each method takes an
+ * `Ordering` which represents the strength of the memory barrier for that
+ * operation. These orderings are the same as C++11 atomic orderings
+ * [http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync]
  *
  * All atomic types are a single word in size.
  */
 
+#[allow(missing_doc)];
+
 use unstable::intrinsics;
 use cast;
 use option::{Option,Some,None};
diff --git a/src/libstd/rt/deque.rs b/src/libstd/sync/deque.rs
index 770fc9ffa12..4d0efcd6ee1 100644
--- a/src/libstd/rt/deque.rs
+++ b/src/libstd/sync/deque.rs
@@ -50,15 +50,18 @@
 
 use cast;
 use clone::Clone;
-use iter::range;
+use iter::{range, Iterator};
 use kinds::Send;
 use libc;
 use mem;
 use ops::Drop;
 use option::{Option, Some, None};
 use ptr;
-use unstable::atomics::{AtomicInt, AtomicPtr, SeqCst};
-use unstable::sync::{UnsafeArc, Exclusive};
+use ptr::RawPtr;
+use sync::arc::UnsafeArc;
+use sync::atomics::{AtomicInt, AtomicPtr, SeqCst};
+use unstable::sync::Exclusive;
+use vec::{OwnedVector, ImmutableVector};
 
 // Once the queue is less than 1/K full, then it will be downsized. Note that
 // the deque requires that this number be less than 2.
@@ -399,8 +402,8 @@ mod tests {
     use rt::thread::Thread;
     use rand;
     use rand::Rng;
-    use unstable::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
-                            AtomicUint, INIT_ATOMIC_UINT};
+    use sync::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst,
+                        AtomicUint, INIT_ATOMIC_UINT};
     use vec;
 
     #[test]
diff --git a/src/libstd/sync/mod.rs b/src/libstd/sync/mod.rs
new file mode 100644
index 00000000000..3213c538152
--- /dev/null
+++ b/src/libstd/sync/mod.rs
@@ -0,0 +1,23 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Useful synchronization primitives
+//!
+//! This modules contains useful safe and unsafe synchronization primitives.
+//! Most of the primitives in this module do not provide any sort of locking
+//! and/or blocking at all, but rather provide the necessary tools to build
+//! other types of concurrent primitives.
+
+pub mod arc;
+pub mod atomics;
+pub mod deque;
+pub mod mpmc_bounded_queue;
+pub mod mpsc_queue;
+pub mod spsc_queue;
diff --git a/src/libstd/rt/mpmc_bounded_queue.rs b/src/libstd/sync/mpmc_bounded_queue.rs
index 25a3ba8ab48..fe51de4e42d 100644
--- a/src/libstd/rt/mpmc_bounded_queue.rs
+++ b/src/libstd/sync/mpmc_bounded_queue.rs
@@ -25,15 +25,17 @@
  * policies, either expressed or implied, of Dmitry Vyukov.
  */
 
+#[allow(missing_doc, dead_code)];
+
 // http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
 
-use unstable::sync::UnsafeArc;
-use unstable::atomics::{AtomicUint,Relaxed,Release,Acquire};
-use option::*;
-use vec;
 use clone::Clone;
 use kinds::Send;
 use num::{Exponential,Algebraic,Round};
+use option::{Option, Some, None};
+use sync::arc::UnsafeArc;
+use sync::atomics::{AtomicUint,Relaxed,Release,Acquire};
+use vec;
 
 struct Node<T> {
     sequence: AtomicUint,
@@ -161,8 +163,8 @@ impl<T: Send> Clone for Queue<T> {
 mod tests {
     use prelude::*;
     use option::*;
-    use task;
     use super::Queue;
+    use native;
 
     #[test]
     fn test() {
@@ -170,14 +172,17 @@ mod tests {
         let nmsgs = 1000u;
         let mut q = Queue::with_capacity(nthreads*nmsgs);
         assert_eq!(None, q.pop());
+        let (port, chan) = SharedChan::new();
 
         for _ in range(0, nthreads) {
             let q = q.clone();
-            do task::spawn_sched(task::SingleThreaded) {
+            let chan = chan.clone();
+            do native::task::spawn {
                 let mut q = q;
                 for i in range(0, nmsgs) {
                     assert!(q.push(i));
                 }
+                chan.send(());
             }
         }
 
@@ -186,7 +191,7 @@ mod tests {
             let (completion_port, completion_chan) = Chan::new();
             completion_ports.push(completion_port);
             let q = q.clone();
-            do task::spawn_sched(task::SingleThreaded) {
+            do native::task::spawn {
                 let mut q = q;
                 let mut i = 0u;
                 loop {
@@ -205,5 +210,8 @@ mod tests {
         for completion_port in completion_ports.mut_iter() {
             assert_eq!(nmsgs, completion_port.recv());
         }
+        for _ in range(0, nthreads) {
+            port.recv();
+        }
     }
 }
diff --git a/src/libstd/rt/mpsc_queue.rs b/src/libstd/sync/mpsc_queue.rs
index d575028af70..a249d6ed2e8 100644
--- a/src/libstd/rt/mpsc_queue.rs
+++ b/src/libstd/sync/mpsc_queue.rs
@@ -26,6 +26,14 @@
  */
 
 //! A mostly lock-free multi-producer, single consumer queue.
+//!
+//! This module contains an implementation of a concurrent MPSC queue. This
+//! queue can be used to share data between tasks, and is also used as the
+//! building block of channels in rust.
+//!
+//! Note that the current implementation of this queue has a caveat of the `pop`
+//! method, and see the method for more information about it. Due to this
+//! caveat, this queue may not be appropriate for all use-cases.
 
 // http://www.1024cores.net/home/lock-free-algorithms
 //                         /queues/non-intrusive-mpsc-node-based-queue
@@ -35,9 +43,11 @@ use clone::Clone;
 use kinds::Send;
 use ops::Drop;
 use option::{Option, None, Some};
-use unstable::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
-use unstable::sync::UnsafeArc;
+use ptr::RawPtr;
+use sync::arc::UnsafeArc;
+use sync::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed};
 
+/// A result of the `pop` function.
 pub enum PopResult<T> {
     /// Some data has been popped
     Data(T),
@@ -61,10 +71,14 @@ struct State<T, P> {
     packet: P,
 }
 
+/// The consumer half of this concurrent queue. This half is used to receive
+/// data from the producers.
 pub struct Consumer<T, P> {
     priv state: UnsafeArc<State<T, P>>,
 }
 
+/// The production half of the concurrent queue. This handle may be cloned in
+/// order to make handles for new producers.
 pub struct Producer<T, P> {
     priv state: UnsafeArc<State<T, P>>,
 }
@@ -75,6 +89,11 @@ impl<T: Send, P: Send> Clone for Producer<T, P> {
     }
 }
 
+/// Creates a new MPSC queue. The given argument `p` is a user-defined "packet"
+/// of information which will be shared by the consumer and the producer which
+/// can be re-acquired via the `packet` function. This is helpful when extra
+/// state is shared between the producer and consumer, but note that there is no
+/// synchronization performed of this data.
 pub fn queue<T: Send, P: Send>(p: P) -> (Consumer<T, P>, Producer<T, P>) {
     unsafe {
         let (a, b) = UnsafeArc::new2(State::new(p));
@@ -92,7 +111,7 @@ impl<T> Node<T> {
 }
 
 impl<T: Send, P: Send> State<T, P> {
-    pub unsafe fn new(p: P) -> State<T, P> {
+    unsafe fn new(p: P) -> State<T, P> {
         let stub = Node::new(None);
         State {
             head: AtomicPtr::new(stub),
@@ -122,10 +141,6 @@ impl<T: Send, P: Send> State<T, P> {
 
         if self.head.load(Acquire) == tail {Empty} else {Inconsistent}
     }
-
-    unsafe fn is_empty(&mut self) -> bool {
-        return (*self.tail).next.load(Acquire).is_null();
-    }
 }
 
 #[unsafe_destructor]
@@ -143,27 +158,42 @@ impl<T: Send, P: Send> Drop for State<T, P> {
 }
 
 impl<T: Send, P: Send> Producer<T, P> {
+    /// Pushes a new value onto this queue.
     pub fn push(&mut self, value: T) {
         unsafe { (*self.state.get()).push(value) }
     }
-    pub fn is_empty(&self) -> bool {
-        unsafe{ (*self.state.get()).is_empty() }
-    }
+    /// Gets an unsafe pointer to the user-defined packet shared by the
+    /// producers and the consumer. Note that care must be taken to ensure that
+    /// the lifetime of the queue outlives the usage of the returned pointer.
     pub unsafe fn packet(&self) -> *mut P {
         &mut (*self.state.get()).packet as *mut P
     }
 }
 
 impl<T: Send, P: Send> Consumer<T, P> {
+    /// Pops some data from this queue.
+    ///
+    /// Note that the current implementation means that this function cannot
+    /// return `Option<T>`. It is possible for this queue to be in an
+    /// inconsistent state where many pushes have suceeded and completely
+    /// finished, but pops cannot return `Some(t)`. This inconsistent state
+    /// happens when a pusher is pre-empted at an inopportune moment.
+    ///
+    /// This inconsistent state means that this queue does indeed have data, but
+    /// it does not currently have access to it at this time.
     pub fn pop(&mut self) -> PopResult<T> {
         unsafe { (*self.state.get()).pop() }
     }
+    /// Attempts to pop data from this queue, but doesn't attempt too hard. This
+    /// will canonicalize inconsistent states to a `None` value.
     pub fn casual_pop(&mut self) -> Option<T> {
         match self.pop() {
             Data(t) => Some(t),
             Empty | Inconsistent => None,
         }
     }
+    /// Gets an unsafe pointer to the underlying user-defined packet. See
+    /// `Producer.packet` for more information.
     pub unsafe fn packet(&self) -> *mut P {
         &mut (*self.state.get()).packet as *mut P
     }
@@ -173,8 +203,8 @@ impl<T: Send, P: Send> Consumer<T, P> {
 mod tests {
     use prelude::*;
 
-    use task;
     use super::{queue, Data, Empty, Inconsistent};
+    use native;
 
     #[test]
     fn test_full() {
@@ -192,14 +222,17 @@ mod tests {
             Empty => {}
             Inconsistent | Data(..) => fail!()
         }
+        let (port, chan) = SharedChan::new();
 
         for _ in range(0, nthreads) {
             let q = p.clone();
-            do task::spawn_sched(task::SingleThreaded) {
+            let chan = chan.clone();
+            do native::task::spawn {
                 let mut q = q;
                 for i in range(0, nmsgs) {
                     q.push(i);
                 }
+                chan.send(());
             }
         }
 
@@ -210,6 +243,9 @@ mod tests {
                 Data(_) => { i += 1 }
             }
         }
+        for _ in range(0, nthreads) {
+            port.recv();
+        }
     }
 }
 
diff --git a/src/libstd/rt/spsc_queue.rs b/src/libstd/sync/spsc_queue.rs
index f14533d726a..6f1b887c271 100644
--- a/src/libstd/rt/spsc_queue.rs
+++ b/src/libstd/sync/spsc_queue.rs
@@ -26,12 +26,20 @@
  */
 
 // http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue
+
+//! A single-producer single-consumer concurrent queue
+//!
+//! This module contains the implementation of an SPSC queue which can be used
+//! concurrently between two tasks. This data structure is safe to use and
+//! enforces the semantics that there is one pusher and one popper.
+
 use cast;
 use kinds::Send;
 use ops::Drop;
 use option::{Some, None, Option};
-use unstable::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
-use unstable::sync::UnsafeArc;
+use ptr::RawPtr;
+use sync::arc::UnsafeArc;
+use sync::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release};
 
 // Node within the linked list queue of messages to send
 struct Node<T> {
@@ -64,14 +72,34 @@ struct State<T, P> {
     packet: P,
 }
 
+/// Producer half of this queue. This handle is used to push data to the
+/// consumer.
 pub struct Producer<T, P> {
     priv state: UnsafeArc<State<T, P>>,
 }
 
+/// Consumer half of this queue. This handle is used to receive data from the
+/// producer.
 pub struct Consumer<T, P> {
     priv state: UnsafeArc<State<T, P>>,
 }
 
+/// Creates a new queue. The producer returned is connected to the consumer to
+/// push all data to the consumer.
+///
+/// # Arguments
+///
+///   * `bound` - This queue implementation is implemented with a linked list,
+///               and this means that a push is always a malloc. In order to
+///               amortize this cost, an internal cache of nodes is maintained
+///               to prevent a malloc from always being necessary. This bound is
+///               the limit on the size of the cache (if desired). If the value
+///               is 0, then the cache has no bound. Otherwise, the cache will
+///               never grow larger than `bound` (although the queue itself
+///               could be much larger.
+///
+///   * `p` - This is the user-defined packet of data which will also be shared
+///           between the producer and consumer.
 pub fn queue<T: Send, P: Send>(bound: uint,
                                p: P) -> (Consumer<T, P>, Producer<T, P>)
 {
@@ -105,21 +133,31 @@ impl<T: Send> Node<T> {
 }
 
 impl<T: Send, P: Send> Producer<T, P> {
+    /// Pushes data onto the queue
     pub fn push(&mut self, t: T) {
         unsafe { (*self.state.get()).push(t) }
     }
+    /// Tests whether the queue is empty. Note that if this function returns
+    /// `false`, the return value is significant, but if the return value is
+    /// `true` then almost no meaning can be attached to the return value.
     pub fn is_empty(&self) -> bool {
         unsafe { (*self.state.get()).is_empty() }
     }
+    /// Acquires an unsafe pointer to the underlying user-defined packet. Note
+    /// that care must be taken to ensure that the queue outlives the usage of
+    /// the packet (because it is an unsafe pointer).
     pub unsafe fn packet(&self) -> *mut P {
         &mut (*self.state.get()).packet as *mut P
     }
 }
 
 impl<T: Send, P: Send> Consumer<T, P> {
+    /// Pops some data from this queue, returning `None` when the queue is
+    /// empty.
     pub fn pop(&mut self) -> Option<T> {
         unsafe { (*self.state.get()).pop() }
     }
+    /// Same function as the producer's `packet` method.
     pub unsafe fn packet(&self) -> *mut P {
         &mut (*self.state.get()).packet as *mut P
     }
@@ -230,7 +268,7 @@ impl<T: Send, P: Send> Drop for State<T, P> {
 mod test {
     use prelude::*;
     use super::queue;
-    use task;
+    use native;
 
     #[test]
     fn smoke() {
@@ -276,7 +314,8 @@ mod test {
 
         fn stress_bound(bound: uint) {
             let (c, mut p) = queue(bound, ());
-            do task::spawn_sched(task::SingleThreaded) {
+            let (port, chan) = Chan::new();
+            do native::task::spawn {
                 let mut c = c;
                 for _ in range(0, 100000) {
                     loop {
@@ -287,10 +326,12 @@ mod test {
                         }
                     }
                 }
+                chan.send(());
             }
             for _ in range(0, 100000) {
                 p.push(1);
             }
+            port.recv();
         }
     }
 }
diff --git a/src/libstd/task/mod.rs b/src/libstd/task.rs
index 3310dddc327..2f0f9bf64af 100644
--- a/src/libstd/task/mod.rs
+++ b/src/libstd/task.rs
@@ -53,22 +53,22 @@
 
 #[allow(missing_doc)];
 
-use prelude::*;
-
+use any::Any;
 use comm::{Chan, Port};
+use kinds::Send;
+use option::{None, Some, Option};
 use result::{Result, Ok, Err};
-use rt::in_green_task_context;
 use rt::local::Local;
+use rt::task::Task;
 use send_str::{SendStr, IntoSendStr};
+use str::Str;
 use util;
 
-#[cfg(test)] use any::Any;
+#[cfg(test)] use any::{AnyOwnExt, AnyRefExt};
 #[cfg(test)] use comm::SharedChan;
 #[cfg(test)] use ptr;
 #[cfg(test)] use result;
 
-pub mod spawn;
-
 /// Indicates the manner in which a task exited.
 ///
 /// A task that completes without failing is considered to exit successfully.
@@ -80,27 +80,6 @@ pub mod spawn;
 /// children tasks complete, recommend using a result future.
 pub type TaskResult = Result<(), ~Any>;
 
-/// Scheduler modes
-#[deriving(Eq)]
-pub enum SchedMode {
-    /// Run task on the default scheduler
-    DefaultScheduler,
-    /// All tasks run in the same OS thread
-    SingleThreaded,
-}
-
-/**
- * Scheduler configuration options
- *
- * # Fields
- *
- * * sched_mode - The operating mode of the scheduler
- *
- */
-pub struct SchedOpts {
-    priv mode: SchedMode,
-}
-
 /**
  * Task configuration options
  *
@@ -121,10 +100,9 @@ pub struct SchedOpts {
  *           scheduler other tasks will be impeded or even blocked indefinitely.
  */
 pub struct TaskOpts {
-    priv watched: bool,
-    priv notify_chan: Option<Chan<TaskResult>>,
+    watched: bool,
+    notify_chan: Option<Chan<TaskResult>>,
     name: Option<SendStr>,
-    sched: SchedOpts,
     stack_size: Option<uint>
 }
 
@@ -153,7 +131,7 @@ pub struct TaskBuilder {
  */
 pub fn task() -> TaskBuilder {
     TaskBuilder {
-        opts: default_task_opts(),
+        opts: TaskOpts::new(),
         gen_body: None,
         can_not_copy: None,
     }
@@ -169,7 +147,6 @@ impl TaskBuilder {
                 watched: self.opts.watched,
                 notify_chan: notify_chan,
                 name: name,
-                sched: self.opts.sched,
                 stack_size: self.opts.stack_size
             },
             gen_body: gen_body,
@@ -229,11 +206,6 @@ impl TaskBuilder {
         self.opts.name = Some(name.into_send_str());
     }
 
-    /// Configure a custom scheduler mode for the task.
-    pub fn sched_mode(&mut self, mode: SchedMode) {
-        self.opts.sched.mode = mode;
-    }
-
     /**
      * Add a wrapper to the body of the spawned task.
      *
@@ -285,7 +257,6 @@ impl TaskBuilder {
             watched: x.opts.watched,
             notify_chan: notify_chan,
             name: name,
-            sched: x.opts.sched,
             stack_size: x.opts.stack_size
         };
         let f = match gen_body {
@@ -296,7 +267,9 @@ impl TaskBuilder {
                 f
             }
         };
-        spawn::spawn_raw(opts, f);
+
+        let t: ~Task = Local::take();
+        t.spawn_sibling(opts, f);
     }
 
     /**
@@ -328,25 +301,23 @@ impl TaskBuilder {
     }
 }
 
-
 /* Task construction */
 
-pub fn default_task_opts() -> TaskOpts {
-    /*!
-     * The default task options
-     *
-     * By default all tasks are supervised by their parent, are spawned
-     * into the same scheduler, and do not post lifecycle notifications.
-     */
-
-    TaskOpts {
-        watched: true,
-        notify_chan: None,
-        name: None,
-        sched: SchedOpts {
-            mode: DefaultScheduler,
-        },
-        stack_size: None
+impl TaskOpts {
+    pub fn new() -> TaskOpts {
+        /*!
+         * The default task options
+         *
+         * By default all tasks are supervised by their parent, are spawned
+         * into the same scheduler, and do not post lifecycle notifications.
+         */
+
+        TaskOpts {
+            watched: true,
+            notify_chan: None,
+            name: None,
+            stack_size: None
+        }
     }
 }
 
@@ -363,24 +334,6 @@ pub fn spawn(f: proc()) {
     task.spawn(f)
 }
 
-pub fn spawn_sched(mode: SchedMode, f: proc()) {
-    /*!
-     * Creates a new task on a new or existing scheduler.
-     *
-     * When there are no more tasks to execute the
-     * scheduler terminates.
-     *
-     * # Failure
-     *
-     * In manual threads mode the number of threads requested must be
-     * greater than zero.
-     */
-
-    let mut task = task();
-    task.sched_mode(mode);
-    task.spawn(f)
-}
-
 pub fn try<T:Send>(f: proc() -> T) -> Result<T, ~Any> {
     /*!
      * Execute a function in another task and return either the return value
@@ -400,14 +353,10 @@ pub fn try<T:Send>(f: proc() -> T) -> Result<T, ~Any> {
 pub fn with_task_name<U>(blk: |Option<&str>| -> U) -> U {
     use rt::task::Task;
 
-    if in_green_task_context() {
-        let mut task = Local::borrow(None::<Task>);
-        match task.get().name {
-            Some(ref name) => blk(Some(name.as_slice())),
-            None => blk(None)
-        }
-    } else {
-        fail!("no task name exists in non-green task context")
+    let mut task = Local::borrow(None::<Task>);
+    match task.get().name {
+        Some(ref name) => blk(Some(name.as_slice())),
+        None => blk(None)
     }
 }
 
@@ -415,11 +364,10 @@ pub fn deschedule() {
     //! Yield control to the task scheduler
 
     use rt::local::Local;
-    use rt::sched::Scheduler;
 
     // FIXME(#7544): Optimize this, since we know we won't block.
-    let sched: ~Scheduler = Local::take();
-    sched.yield_now();
+    let task: ~Task = Local::take();
+    task.yield_now();
 }
 
 pub fn failing() -> bool {
@@ -428,7 +376,7 @@ pub fn failing() -> bool {
     use rt::task::Task;
 
     let mut local = Local::borrow(None::<Task>);
-    local.get().unwinder.unwinding
+    local.get().unwinder.unwinding()
 }
 
 // The following 8 tests test the following 2^3 combinations:
@@ -439,59 +387,43 @@ pub fn failing() -> bool {
 
 #[test]
 fn test_unnamed_task() {
-    use rt::test::run_in_uv_task;
-
-    do run_in_uv_task {
-        do spawn {
-            with_task_name(|name| {
-                assert!(name.is_none());
-            })
-        }
+    do spawn {
+        with_task_name(|name| {
+            assert!(name.is_none());
+        })
     }
 }
 
 #[test]
 fn test_owned_named_task() {
-    use rt::test::run_in_uv_task;
-
-    do run_in_uv_task {
-        let mut t = task();
-        t.name(~"ada lovelace");
-        do t.spawn {
-            with_task_name(|name| {
-                assert!(name.unwrap() == "ada lovelace");
-            })
-        }
+    let mut t = task();
+    t.name(~"ada lovelace");
+    do t.spawn {
+        with_task_name(|name| {
+            assert!(name.unwrap() == "ada lovelace");
+        })
     }
 }
 
 #[test]
 fn test_static_named_task() {
-    use rt::test::run_in_uv_task;
-
-    do run_in_uv_task {
-        let mut t = task();
-        t.name("ada lovelace");
-        do t.spawn {
-            with_task_name(|name| {
-                assert!(name.unwrap() == "ada lovelace");
-            })
-        }
+    let mut t = task();
+    t.name("ada lovelace");
+    do t.spawn {
+        with_task_name(|name| {
+            assert!(name.unwrap() == "ada lovelace");
+        })
     }
 }
 
 #[test]
 fn test_send_named_task() {
-    use rt::test::run_in_uv_task;
-
-    do run_in_uv_task {
-        let mut t = task();
-        t.name("ada lovelace".into_send_str());
-        do t.spawn {
-            with_task_name(|name| {
-                assert!(name.unwrap() == "ada lovelace");
-            })
-        }
+    let mut t = task();
+    t.name("ada lovelace".into_send_str());
+    do t.spawn {
+        with_task_name(|name| {
+            assert!(name.unwrap() == "ada lovelace");
+        })
     }
 }
 
@@ -562,28 +494,19 @@ fn test_try_fail() {
     }
 }
 
-#[cfg(test)]
-fn get_sched_id() -> int {
-    use rt::sched::Scheduler;
-    let mut sched = Local::borrow(None::<Scheduler>);
-    sched.get().sched_id() as int
-}
-
 #[test]
 fn test_spawn_sched() {
+    use clone::Clone;
+
     let (po, ch) = SharedChan::new();
 
     fn f(i: int, ch: SharedChan<()>) {
-        let parent_sched_id = get_sched_id();
-
-        do spawn_sched(SingleThreaded) {
-            let child_sched_id = get_sched_id();
-            assert!(parent_sched_id != child_sched_id);
-
+        let ch = ch.clone();
+        do spawn {
             if (i == 0) {
                 ch.send(());
             } else {
-                f(i - 1, ch.clone());
+                f(i - 1, ch);
             }
         };
 
@@ -596,16 +519,9 @@ fn test_spawn_sched() {
 fn test_spawn_sched_childs_on_default_sched() {
     let (po, ch) = Chan::new();
 
-    // Assuming tests run on the default scheduler
-    let default_id = get_sched_id();
-
-    do spawn_sched(SingleThreaded) {
+    do spawn {
         let ch = ch;
-        let parent_sched_id = get_sched_id();
         do spawn {
-            let child_sched_id = get_sched_id();
-            assert!(parent_sched_id != child_sched_id);
-            assert_eq!(child_sched_id, default_id);
             ch.send(());
         };
     };
@@ -613,65 +529,6 @@ fn test_spawn_sched_childs_on_default_sched() {
     po.recv();
 }
 
-#[test]
-fn test_spawn_sched_blocking() {
-    use unstable::mutex::Mutex;
-
-    unsafe {
-
-        // Testing that a task in one scheduler can block in foreign code
-        // without affecting other schedulers
-        20u.times(|| {
-            let (start_po, start_ch) = Chan::new();
-            let (fin_po, fin_ch) = Chan::new();
-
-            let mut lock = Mutex::new();
-            let lock2 = lock.clone();
-
-            do spawn_sched(SingleThreaded) {
-                let mut lock = lock2;
-                lock.lock();
-
-                start_ch.send(());
-
-                // Block the scheduler thread
-                lock.wait();
-                lock.unlock();
-
-                fin_ch.send(());
-            };
-
-            // Wait until the other task has its lock
-            start_po.recv();
-
-            fn pingpong(po: &Port<int>, ch: &Chan<int>) {
-                let mut val = 20;
-                while val > 0 {
-                    val = po.recv();
-                    ch.try_send(val - 1);
-                }
-            }
-
-            let (setup_po, setup_ch) = Chan::new();
-            let (parent_po, parent_ch) = Chan::new();
-            do spawn {
-                let (child_po, child_ch) = Chan::new();
-                setup_ch.send(child_ch);
-                pingpong(&child_po, &parent_ch);
-            };
-
-            let child_ch = setup_po.recv();
-            child_ch.send(20);
-            pingpong(&parent_po, &child_ch);
-            lock.lock();
-            lock.signal();
-            lock.unlock();
-            fin_po.recv();
-            lock.destroy();
-        })
-    }
-}
-
 #[cfg(test)]
 fn avoid_copying_the_body(spawnfn: |v: proc()|) {
     let (p, ch) = Chan::<uint>::new();
@@ -735,11 +592,7 @@ fn test_child_doesnt_ref_parent() {
 
 #[test]
 fn test_simple_newsched_spawn() {
-    use rt::test::run_in_uv_task;
-
-    do run_in_uv_task {
-        spawn(proc()())
-    }
+    spawn(proc()())
 }
 
 #[test]
diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs
deleted file mode 100644
index 1148774020a..00000000000
--- a/src/libstd/task/spawn.rs
+++ /dev/null
@@ -1,233 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-/*!**************************************************************************
- *
- * WARNING: linked failure has been removed since this doc comment was written,
- *          but it was so pretty that I didn't want to remove it.
- *
- * Spawning & linked failure
- *
- * Several data structures are involved in task management to allow properly
- * propagating failure across linked/supervised tasks.
- *
- * (1) The "taskgroup_arc" is an unsafe::exclusive which contains a hashset of
- *     all tasks that are part of the group. Some tasks are 'members', which
- *     means if they fail, they will kill everybody else in the taskgroup.
- *     Other tasks are 'descendants', which means they will not kill tasks
- *     from this group, but can be killed by failing members.
- *
- *     A new one of these is created each spawn_linked or spawn_supervised.
- *
- * (2) The "taskgroup" is a per-task control structure that tracks a task's
- *     spawn configuration. It contains a reference to its taskgroup_arc, a
- *     reference to its node in the ancestor list (below), and an optionally
- *     configured notification port. These are stored in TLS.
- *
- * (3) The "ancestor_list" is a cons-style list of unsafe::exclusives which
- *     tracks 'generations' of taskgroups -- a group's ancestors are groups
- *     which (directly or transitively) spawn_supervised-ed them. Each task
- *     is recorded in the 'descendants' of each of its ancestor groups.
- *
- *     Spawning a supervised task is O(n) in the number of generations still
- *     alive, and exiting (by success or failure) that task is also O(n).
- *
- * This diagram depicts the references between these data structures:
- *
- *          linked_________________________________
- *        ___/                   _________         \___
- *       /   \                  | group X |        /   \
- *      (  A  ) - - - - - - - > | {A,B} {}|< - - -(  B  )
- *       \___/                  |_________|        \___/
- *      unlinked
- *         |      __ (nil)
- *         |      //|                         The following code causes this:
- *         |__   //   /\         _________
- *        /   \ //    ||        | group Y |     fn taskA() {
- *       (  C  )- - - ||- - - > |{C} {D,E}|         spawn(taskB);
- *        \___/      /  \=====> |_________|         spawn_unlinked(taskC);
- *      supervise   /gen \                          ...
- *         |    __  \ 00 /                      }
- *         |    //|  \__/                       fn taskB() { ... }
- *         |__ //     /\         _________      fn taskC() {
- *        /   \/      ||        | group Z |         spawn_supervised(taskD);
- *       (  D  )- - - ||- - - > | {D} {E} |         ...
- *        \___/      /  \=====> |_________|     }
- *      supervise   /gen \                      fn taskD() {
- *         |    __  \ 01 /                          spawn_supervised(taskE);
- *         |    //|  \__/                           ...
- *         |__ //                _________      }
- *        /   \/                | group W |     fn taskE() { ... }
- *       (  E  )- - - - - - - > | {E}  {} |
- *        \___/                 |_________|
- *
- *        "tcb"               "taskgroup_arc"
- *             "ancestor_list"
- *
- ****************************************************************************/
-
-#[doc(hidden)];
-
-use prelude::*;
-
-use comm::Chan;
-use rt::local::Local;
-use rt::sched::{Scheduler, Shutdown, TaskFromFriend};
-use rt::task::{Task, Sched};
-use rt::thread::Thread;
-use rt::{in_green_task_context, new_event_loop};
-use task::{SingleThreaded, TaskOpts, TaskResult};
-
-#[cfg(test)] use task::default_task_opts;
-#[cfg(test)] use task;
-
-pub fn spawn_raw(mut opts: TaskOpts, f: proc()) {
-    assert!(in_green_task_context());
-
-    let mut task = if opts.sched.mode != SingleThreaded {
-        if opts.watched {
-            Task::build_child(opts.stack_size, f)
-        } else {
-            Task::build_root(opts.stack_size, f)
-        }
-    } else {
-        unsafe {
-            // Creating a 1:1 task:thread ...
-            let sched: *mut Scheduler = Local::unsafe_borrow();
-            let sched_handle = (*sched).make_handle();
-
-            // Since this is a 1:1 scheduler we create a queue not in
-            // the stealee set. The run_anything flag is set false
-            // which will disable stealing.
-            let (worker, _stealer) = (*sched).work_queue.pool().deque();
-
-            // Create a new scheduler to hold the new task
-            let mut new_sched = ~Scheduler::new_special(new_event_loop(),
-                                                        worker,
-                                                        (*sched).work_queues.clone(),
-                                                        (*sched).sleeper_list.clone(),
-                                                        false,
-                                                        Some(sched_handle));
-            let mut new_sched_handle = new_sched.make_handle();
-
-            // Allow the scheduler to exit when the pinned task exits
-            new_sched_handle.send(Shutdown);
-
-            // Pin the new task to the new scheduler
-            let new_task = if opts.watched {
-                Task::build_homed_child(opts.stack_size, f, Sched(new_sched_handle))
-            } else {
-                Task::build_homed_root(opts.stack_size, f, Sched(new_sched_handle))
-            };
-
-            // Create a task that will later be used to join with the new scheduler
-            // thread when it is ready to terminate
-            let (thread_port, thread_chan) = Chan::new();
-            let join_task = do Task::build_child(None) {
-                debug!("running join task");
-                let thread: Thread<()> = thread_port.recv();
-                thread.join();
-            };
-
-            // Put the scheduler into another thread
-            let orig_sched_handle = (*sched).make_handle();
-
-            let new_sched = new_sched;
-            let thread = do Thread::start {
-                let mut new_sched = new_sched;
-                let mut orig_sched_handle = orig_sched_handle;
-
-                let bootstrap_task = ~do Task::new_root(&mut new_sched.stack_pool, None) || {
-                    debug!("boostrapping a 1:1 scheduler");
-                };
-                new_sched.bootstrap(bootstrap_task);
-
-                // Now tell the original scheduler to join with this thread
-                // by scheduling a thread-joining task on the original scheduler
-                orig_sched_handle.send(TaskFromFriend(join_task));
-
-                // NB: We can't simply send a message from here to another task
-                // because this code isn't running in a task and message passing doesn't
-                // work outside of tasks. Hence we're sending a scheduler message
-                // to execute a new task directly to a scheduler.
-            };
-
-            // Give the thread handle to the join task
-            thread_chan.send(thread);
-
-            // When this task is enqueued on the current scheduler it will then get
-            // forwarded to the scheduler to which it is pinned
-            new_task
-        }
-    };
-
-    if opts.notify_chan.is_some() {
-        let notify_chan = opts.notify_chan.take_unwrap();
-        let on_exit: proc(TaskResult) = proc(task_result) {
-            notify_chan.try_send(task_result);
-        };
-        task.death.on_exit = Some(on_exit);
-    }
-
-    task.name = opts.name.take();
-    debug!("spawn calling run_task");
-    Scheduler::run_task(task);
-
-}
-
-#[test]
-fn test_spawn_raw_simple() {
-    let (po, ch) = Chan::new();
-    do spawn_raw(default_task_opts()) {
-        ch.send(());
-    }
-    po.recv();
-}
-
-#[test]
-fn test_spawn_raw_unsupervise() {
-    let opts = task::TaskOpts {
-        watched: false,
-        notify_chan: None,
-        .. default_task_opts()
-    };
-    do spawn_raw(opts) {
-        fail!();
-    }
-}
-
-#[test]
-fn test_spawn_raw_notify_success() {
-    let (notify_po, notify_ch) = Chan::new();
-
-    let opts = task::TaskOpts {
-        notify_chan: Some(notify_ch),
-        .. default_task_opts()
-    };
-    do spawn_raw(opts) {
-    }
-    assert!(notify_po.recv().is_ok());
-}
-
-#[test]
-fn test_spawn_raw_notify_failure() {
-    // New bindings for these
-    let (notify_po, notify_ch) = Chan::new();
-
-    let opts = task::TaskOpts {
-        watched: false,
-        notify_chan: Some(notify_ch),
-        .. default_task_opts()
-    };
-    do spawn_raw(opts) {
-        fail!();
-    }
-    assert!(notify_po.recv().is_err());
-}
diff --git a/src/libstd/unstable/dynamic_lib.rs b/src/libstd/unstable/dynamic_lib.rs
index 03b25fbd044..0569fe32c58 100644
--- a/src/libstd/unstable/dynamic_lib.rs
+++ b/src/libstd/unstable/dynamic_lib.rs
@@ -140,7 +140,6 @@ pub mod dl {
     use path;
     use ptr;
     use str;
-    use unstable::sync::atomic;
     use result::*;
 
     pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void {
@@ -158,11 +157,7 @@ pub mod dl {
         static mut lock: Mutex = MUTEX_INIT;
         unsafe {
             // dlerror isn't thread safe, so we need to lock around this entire
-            // sequence. `atomic` asserts that we don't do anything that
-            // would cause this task to be descheduled, which could deadlock
-            // the scheduler if it happens while the lock is held.
-            // FIXME #9105 use a Rust mutex instead of C++ mutexes.
-            let _guard = atomic();
+            // sequence
             lock.lock();
             let _old_error = dlerror();
 
@@ -208,7 +203,6 @@ pub mod dl {
     use libc;
     use path;
     use ptr;
-    use unstable::sync::atomic;
     use result::*;
 
     pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void {
@@ -225,7 +219,6 @@ pub mod dl {
 
     pub fn check_for_errors_in<T>(f: || -> T) -> Result<T, ~str> {
         unsafe {
-            let _guard = atomic();
             SetLastError(0);
 
             let result = f();
diff --git a/src/libstd/unstable/lang.rs b/src/libstd/unstable/lang.rs
index 06f9ba65ae7..e7e8cec9d5f 100644
--- a/src/libstd/unstable/lang.rs
+++ b/src/libstd/unstable/lang.rs
@@ -11,15 +11,13 @@
 //! Runtime calls emitted by the compiler.
 
 use c_str::ToCStr;
-use cast::transmute;
 use libc::{c_char, size_t, uintptr_t};
-use rt::task;
 use rt::borrowck;
 
 #[cold]
 #[lang="fail_"]
 pub fn fail_(expr: *c_char, file: *c_char, line: size_t) -> ! {
-    task::begin_unwind_raw(expr, file, line);
+    ::rt::begin_unwind_raw(expr, file, line);
 }
 
 #[cold]
@@ -81,15 +79,3 @@ pub unsafe fn check_not_borrowed(a: *u8,
                                  line: size_t) {
     borrowck::check_not_borrowed(a, file, line)
 }
-
-#[lang="start"]
-pub fn start(main: *u8, argc: int, argv: **c_char) -> int {
-    use rt;
-
-    unsafe {
-        return do rt::start(argc, argv as **u8) {
-            let main: extern "Rust" fn() = transmute(main);
-            main();
-        };
-    }
-}
diff --git a/src/libstd/unstable/mod.rs b/src/libstd/unstable/mod.rs
index 043d99eb1b8..f4573785996 100644
--- a/src/libstd/unstable/mod.rs
+++ b/src/libstd/unstable/mod.rs
@@ -22,8 +22,8 @@ pub mod simd;
 pub mod lang;
 pub mod sync;
 pub mod mutex;
-pub mod atomics;
 pub mod raw;
+pub mod stack;
 
 /**
 
diff --git a/src/libstd/unstable/mutex.rs b/src/libstd/unstable/mutex.rs
index 3e7a861b385..5b2fac8e74e 100644
--- a/src/libstd/unstable/mutex.rs
+++ b/src/libstd/unstable/mutex.rs
@@ -48,7 +48,7 @@
 #[allow(non_camel_case_types)];
 
 use libc::c_void;
-use unstable::atomics;
+use sync::atomics;
 
 pub struct Mutex {
     // pointers for the lock/cond handles, atomically updated
@@ -333,12 +333,12 @@ mod test {
     fn somke_cond() {
         static mut lock: Mutex = MUTEX_INIT;
         unsafe {
+            lock.lock();
             let t = do Thread::start {
                 lock.lock();
                 lock.signal();
                 lock.unlock();
             };
-            lock.lock();
             lock.wait();
             lock.unlock();
             t.join();
diff --git a/src/libstd/unstable/stack.rs b/src/libstd/unstable/stack.rs
new file mode 100644
index 00000000000..d6cd690eaa9
--- /dev/null
+++ b/src/libstd/unstable/stack.rs
@@ -0,0 +1,275 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Rust stack-limit management
+//!
+//! Currently Rust uses a segmented-stack-like scheme in order to detect stack
+//! overflow for rust tasks. In this scheme, the prologue of all functions are
+//! preceded with a check to see whether the current stack limits are being
+//! exceeded.
+//!
+//! This module provides the functionality necessary in order to manage these
+//! stack limits (which are stored in platform-specific locations). The
+//! functions here are used at the borders of the task lifetime in order to
+//! manage these limits.
+//!
+//! This function is an unstable module because this scheme for stack overflow
+//! detection is not guaranteed to continue in the future. Usage of this module
+//! is discouraged unless absolutely necessary.
+
+static RED_ZONE: uint = 20 * 1024;
+
+/// This function is invoked from rust's current __morestack function. Segmented
+/// stacks are currently not enabled as segmented stacks, but rather one giant
+/// stack segment. This means that whenever we run out of stack, we want to
+/// truly consider it to be stack overflow rather than allocating a new stack.
+#[no_mangle]      // - this is called from C code
+#[no_split_stack] // - it would be sad for this function to trigger __morestack
+#[doc(hidden)]    // - Function must be `pub` to get exported, but it's
+                  //   irrelevant for documentation purposes.
+#[cfg(not(test))] // in testing, use the original libstd's version
+pub extern "C" fn rust_stack_exhausted() {
+    use rt::task::Task;
+    use option::None;
+    use rt::local::Local;
+    use unstable::intrinsics;
+
+    unsafe {
+        // We're calling this function because the stack just ran out. We need
+        // to call some other rust functions, but if we invoke the functions
+        // right now it'll just trigger this handler being called again. In
+        // order to alleviate this, we move the stack limit to be inside of the
+        // red zone that was allocated for exactly this reason.
+        let limit = get_sp_limit();
+        record_sp_limit(limit - RED_ZONE / 2);
+
+        // This probably isn't the best course of action. Ideally one would want
+        // to unwind the stack here instead of just aborting the entire process.
+        // This is a tricky problem, however. There's a few things which need to
+        // be considered:
+        //
+        //  1. We're here because of a stack overflow, yet unwinding will run
+        //     destructors and hence arbitrary code. What if that code overflows
+        //     the stack? One possibility is to use the above allocation of an
+        //     extra 10k to hope that we don't hit the limit, and if we do then
+        //     abort the whole program. Not the best, but kind of hard to deal
+        //     with unless we want to switch stacks.
+        //
+        //  2. LLVM will optimize functions based on whether they can unwind or
+        //     not. It will flag functions with 'nounwind' if it believes that
+        //     the function cannot trigger unwinding, but if we do unwind on
+        //     stack overflow then it means that we could unwind in any function
+        //     anywhere. We would have to make sure that LLVM only places the
+        //     nounwind flag on functions which don't call any other functions.
+        //
+        //  3. The function that overflowed may have owned arguments. These
+        //     arguments need to have their destructors run, but we haven't even
+        //     begun executing the function yet, so unwinding will not run the
+        //     any landing pads for these functions. If this is ignored, then
+        //     the arguments will just be leaked.
+        //
+        // Exactly what to do here is a very delicate topic, and is possibly
+        // still up in the air for what exactly to do. Some relevant issues:
+        //
+        //  #3555 - out-of-stack failure leaks arguments
+        //  #3695 - should there be a stack limit?
+        //  #9855 - possible strategies which could be taken
+        //  #9854 - unwinding on windows through __morestack has never worked
+        //  #2361 - possible implementation of not using landing pads
+
+        let mut task = Local::borrow(None::<Task>);
+        let n = task.get().name.as_ref()
+                    .map(|n| n.as_slice()).unwrap_or("<unnamed>");
+
+        // See the message below for why this is not emitted to the
+        // task's logger. This has the additional conundrum of the
+        // logger may not be initialized just yet, meaning that an FFI
+        // call would happen to initialized it (calling out to libuv),
+        // and the FFI call needs 2MB of stack when we just ran out.
+        println!("task '{}' has overflowed its stack", n);
+
+        intrinsics::abort();
+    }
+}
+
+#[inline(always)]
+pub unsafe fn record_stack_bounds(stack_lo: uint, stack_hi: uint) {
+    // When the old runtime had segmented stacks, it used a calculation that was
+    // "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
+    // symbol resolution, llvm function calls, etc. In theory this red zone
+    // value is 0, but it matters far less when we have gigantic stacks because
+    // we don't need to be so exact about our stack budget. The "fudge factor"
+    // was because LLVM doesn't emit a stack check for functions < 256 bytes in
+    // size. Again though, we have giant stacks, so we round all these
+    // calculations up to the nice round number of 20k.
+    record_sp_limit(stack_lo + RED_ZONE);
+
+    return target_record_stack_bounds(stack_lo, stack_hi);
+
+    #[cfg(not(windows))] #[cfg(not(target_arch = "x86_64"))] #[inline(always)]
+    unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
+    #[cfg(windows, target_arch = "x86_64")] #[inline(always)]
+    unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
+        // Windows compiles C functions which may check the stack bounds. This
+        // means that if we want to perform valid FFI on windows, then we need
+        // to ensure that the stack bounds are what they truly are for this
+        // task. More info can be found at:
+        //   https://github.com/mozilla/rust/issues/3445#issuecomment-26114839
+        //
+        // stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
+        asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
+        asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
+    }
+}
+
+/// Records the current limit of the stack as specified by `end`.
+///
+/// This is stored in an OS-dependent location, likely inside of the thread
+/// local storage. The location that the limit is stored is a pre-ordained
+/// location because it's where LLVM has emitted code to check.
+///
+/// Note that this cannot be called under normal circumstances. This function is
+/// changing the stack limit, so upon returning any further function calls will
+/// possibly be triggering the morestack logic if you're not careful.
+///
+/// Also note that this and all of the inside functions are all flagged as
+/// "inline(always)" because they're messing around with the stack limits.  This
+/// would be unfortunate for the functions themselves to trigger a morestack
+/// invocation (if they were an actual function call).
+#[inline(always)]
+pub unsafe fn record_sp_limit(limit: uint) {
+    return target_record_sp_limit(limit);
+
+    // x86-64
+    #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movq $$0x60+90*8, %rsi
+              movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
+    }
+    #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
+    }
+    #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
+        // store this inside of the "arbitrary data slot", but double the size
+        // because this is 64 bit instead of 32 bit
+        asm!("movq $0, %gs:0x28" :: "r"(limit) :: "volatile")
+    }
+    #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
+    }
+
+    // x86
+    #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movl $$0x48+90*4, %eax
+              movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
+    }
+    #[cfg(target_arch = "x86", target_os = "linux")]
+    #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
+    }
+    #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block
+        // store this inside of the "arbitrary data slot"
+        asm!("movl $0, %fs:0x14" :: "r"(limit) :: "volatile")
+    }
+
+    // mips, arm - Some brave soul can port these to inline asm, but it's over
+    //             my head personally
+    #[cfg(target_arch = "mips")]
+    #[cfg(target_arch = "arm")] #[inline(always)]
+    unsafe fn target_record_sp_limit(limit: uint) {
+        use libc::c_void;
+        return record_sp_limit(limit as *c_void);
+        extern {
+            fn record_sp_limit(limit: *c_void);
+        }
+    }
+}
+
+/// The counterpart of the function above, this function will fetch the current
+/// stack limit stored in TLS.
+///
+/// Note that all of these functions are meant to be exact counterparts of their
+/// brethren above, except that the operands are reversed.
+///
+/// As with the setter, this function does not have a __morestack header and can
+/// therefore be called in a "we're out of stack" situation.
+#[inline(always)]
+pub unsafe fn get_sp_limit() -> uint {
+    return target_get_sp_limit();
+
+    // x86-64
+    #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq $$0x60+90*8, %rsi
+              movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
+        return limit;
+    }
+    #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+    #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq %gs:0x28, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+    #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+
+    // x86
+    #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movl $$0x48+90*4, %eax
+              movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
+        return limit;
+    }
+    #[cfg(target_arch = "x86", target_os = "linux")]
+    #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+    #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        let limit;
+        asm!("movl %fs:0x14, $0" : "=r"(limit) ::: "volatile");
+        return limit;
+    }
+
+    // mips, arm - Some brave soul can port these to inline asm, but it's over
+    //             my head personally
+    #[cfg(target_arch = "mips")]
+    #[cfg(target_arch = "arm")] #[inline(always)]
+    unsafe fn target_get_sp_limit() -> uint {
+        use libc::c_void;
+        return get_sp_limit() as uint;
+        extern {
+            fn get_sp_limit() -> *c_void;
+        }
+    }
+}
diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs
index 50fae1e0239..687efea939b 100644
--- a/src/libstd/unstable/sync.rs
+++ b/src/libstd/unstable/sync.rs
@@ -8,353 +8,12 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use cast;
-use comm::{Chan, Port};
-use ptr;
-use option::{Option,Some,None};
-use task;
-use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,Relaxed,SeqCst};
-use unstable::mutex::Mutex;
-use ops::Drop;
 use clone::Clone;
 use kinds::Send;
-use vec;
-
-/// An atomically reference counted pointer.
-///
-/// Enforces no shared-memory safety.
-//#[unsafe_no_drop_flag] FIXME: #9758
-pub struct UnsafeArc<T> {
-    data: *mut ArcData<T>,
-}
-
-pub enum UnsafeArcUnwrap<T> {
-    UnsafeArcSelf(UnsafeArc<T>),
-    UnsafeArcT(T)
-}
-
-#[cfg(test)]
-impl<T> UnsafeArcUnwrap<T> {
-    fn expect_t(self, msg: &'static str) -> T {
-        match self {
-            UnsafeArcSelf(_) => fail!(msg),
-            UnsafeArcT(t) => t
-        }
-    }
-
-    fn is_self(&self) -> bool {
-        match *self {
-            UnsafeArcSelf(_) => true,
-            UnsafeArcT(_) => false
-        }
-    }
-}
-
-struct ArcData<T> {
-    count: AtomicUint,
-    // An unwrapper uses this protocol to communicate with the "other" task that
-    // drops the last refcount on an arc. Unfortunately this can't be a proper
-    // pipe protocol because the unwrapper has to access both stages at once.
-    // FIXME(#7544): Maybe use AtomicPtr instead (to avoid xchg in take() later)?
-    unwrapper: AtomicOption<(Chan<()>, Port<bool>)>,
-    // FIXME(#3224) should be able to make this non-option to save memory
-    data: Option<T>,
-}
-
-unsafe fn new_inner<T: Send>(data: T, refcount: uint) -> *mut ArcData<T> {
-    let data = ~ArcData { count: AtomicUint::new(refcount),
-                          unwrapper: AtomicOption::empty(),
-                          data: Some(data) };
-    cast::transmute(data)
-}
-
-/// A helper object used by `UnsafeArc::unwrap`.
-struct ChannelAndDataGuard<T> {
-    channel: Option<Chan<bool>>,
-    data: Option<~ArcData<T>>,
-}
-
-#[unsafe_destructor]
-impl<T> Drop for ChannelAndDataGuard<T> {
-    fn drop(&mut self) {
-        if task::failing() {
-            // Killed during wait. Because this might happen while
-            // someone else still holds a reference, we can't free
-            // the data now; the "other" last refcount will free it.
-            unsafe {
-                let channel = self.channel.take_unwrap();
-                let data = self.data.take_unwrap();
-                channel.send(false);
-                cast::forget(data);
-            }
-        }
-    }
-}
-
-impl<T> ChannelAndDataGuard<T> {
-    fn unwrap(mut self) -> (Chan<bool>, ~ArcData<T>) {
-        (self.channel.take_unwrap(), self.data.take_unwrap())
-    }
-}
-
-impl<T: Send> UnsafeArc<T> {
-    pub fn new(data: T) -> UnsafeArc<T> {
-        unsafe { UnsafeArc { data: new_inner(data, 1) } }
-    }
-
-    /// As new(), but returns an extra pre-cloned handle.
-    pub fn new2(data: T) -> (UnsafeArc<T>, UnsafeArc<T>) {
-        unsafe {
-            let ptr = new_inner(data, 2);
-            (UnsafeArc { data: ptr }, UnsafeArc { data: ptr })
-        }
-    }
-
-    /// As new(), but returns a vector of as many pre-cloned handles as requested.
-    pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc<T>] {
-        unsafe {
-            if num_handles == 0 {
-                ~[] // need to free data here
-            } else {
-                let ptr = new_inner(data, num_handles);
-                vec::from_fn(num_handles, |_| UnsafeArc { data: ptr })
-            }
-        }
-    }
-
-    /// As newN(), but from an already-existing handle. Uses one xadd.
-    pub fn cloneN(self, num_handles: uint) -> ~[UnsafeArc<T>] {
-        if num_handles == 0 {
-            ~[] // The "num_handles - 1" trick (below) fails in the 0 case.
-        } else {
-            unsafe {
-                // Minus one because we are recycling the given handle's refcount.
-                let old_count = (*self.data).count.fetch_add(num_handles - 1, Acquire);
-                // let old_count = (*self.data).count.fetch_add(num_handles, Acquire);
-                assert!(old_count >= 1);
-                let ptr = self.data;
-                cast::forget(self); // Don't run the destructor on this handle.
-                vec::from_fn(num_handles, |_| UnsafeArc { data: ptr })
-            }
-        }
-    }
-
-    #[inline]
-    pub fn get(&self) -> *mut T {
-        unsafe {
-            assert!((*self.data).count.load(Relaxed) > 0);
-            let r: *mut T = (*self.data).data.get_mut_ref();
-            return r;
-        }
-    }
-
-    #[inline]
-    pub fn get_immut(&self) -> *T {
-        unsafe {
-            assert!((*self.data).count.load(Relaxed) > 0);
-            let r: *T = (*self.data).data.get_ref();
-            return r;
-        }
-    }
-
-    /// Wait until all other handles are dropped, then retrieve the enclosed
-    /// data. See extra::arc::Arc for specific semantics documentation.
-    /// If called when the task is already unkillable, unwrap will unkillably
-    /// block; otherwise, an unwrapping task can be killed by linked failure.
-    pub fn unwrap(self) -> T {
-        unsafe {
-            let mut this = self;
-            // The ~ dtor needs to run if this code succeeds.
-            let mut data: ~ArcData<T> = cast::transmute(this.data);
-            // Set up the unwrap protocol.
-            let (p1,c1) = Chan::new(); // ()
-            let (p2,c2) = Chan::new(); // bool
-            // Try to put our server end in the unwrapper slot.
-            // This needs no barrier -- it's protected by the release barrier on
-            // the xadd, and the acquire+release barrier in the destructor's xadd.
-            if data.unwrapper.fill(~(c1,p2), Relaxed).is_none() {
-                // Got in. Tell this handle's destructor not to run (we are now it).
-                this.data = ptr::mut_null();
-                // Drop our own reference.
-                let old_count = data.count.fetch_sub(1, Release);
-                assert!(old_count >= 1);
-                if old_count == 1 {
-                    // We were the last owner. Can unwrap immediately.
-                    // AtomicOption's destructor will free the server endpoint.
-                    // FIXME(#3224): it should be like this
-                    // let ~ArcData { data: user_data, _ } = data;
-                    // user_data
-                    data.data.take_unwrap()
-                } else {
-                    // The *next* person who sees the refcount hit 0 will wake us.
-                    let c2_and_data = ChannelAndDataGuard {
-                        channel: Some(c2),
-                        data: Some(data),
-                    };
-                    p1.recv();
-                    // Got here. Back in the 'unkillable' without getting killed.
-                    let (c2, data) = c2_and_data.unwrap();
-                    c2.send(true);
-                    // FIXME(#3224): it should be like this
-                    // let ~ArcData { data: user_data, _ } = data;
-                    // user_data
-                    let mut data = data;
-                    data.data.take_unwrap()
-                }
-            } else {
-                // If 'put' returns the server end back to us, we were rejected;
-                // someone else was trying to unwrap. Avoid guaranteed deadlock.
-                cast::forget(data);
-                fail!("Another task is already unwrapping this Arc!");
-            }
-        }
-    }
-
-    /// As unwrap above, but without blocking. Returns 'UnsafeArcSelf(self)' if this is
-    /// not the last reference; 'UnsafeArcT(unwrapped_data)' if so.
-    pub fn try_unwrap(mut self) -> UnsafeArcUnwrap<T> {
-        unsafe {
-            // The ~ dtor needs to run if this code succeeds.
-            let mut data: ~ArcData<T> = cast::transmute(self.data);
-            // This can of course race with anybody else who has a handle, but in
-            // such a case, the returned count will always be at least 2. If we
-            // see 1, no race was possible. All that matters is 1 or not-1.
-            let count = data.count.load(Acquire);
-            assert!(count >= 1);
-            // The more interesting race is one with an unwrapper. They may have
-            // already dropped their count -- but if so, the unwrapper pointer
-            // will have been set first, which the barriers ensure we will see.
-            // (Note: using is_empty(), not take(), to not free the unwrapper.)
-            if count == 1 && data.unwrapper.is_empty(Acquire) {
-                // Tell this handle's destructor not to run (we are now it).
-                self.data = ptr::mut_null();
-                // FIXME(#3224) as above
-                UnsafeArcT(data.data.take_unwrap())
-            } else {
-                cast::forget(data);
-                UnsafeArcSelf(self)
-            }
-        }
-    }
-}
-
-impl<T: Send> Clone for UnsafeArc<T> {
-    fn clone(&self) -> UnsafeArc<T> {
-        unsafe {
-            // This barrier might be unnecessary, but I'm not sure...
-            let old_count = (*self.data).count.fetch_add(1, Acquire);
-            assert!(old_count >= 1);
-            return UnsafeArc { data: self.data };
-        }
-    }
-}
-
-#[unsafe_destructor]
-impl<T> Drop for UnsafeArc<T>{
-    fn drop(&mut self) {
-        unsafe {
-            // Happens when destructing an unwrapper's handle and from `#[unsafe_no_drop_flag]`
-            if self.data.is_null() {
-                return
-            }
-            let mut data: ~ArcData<T> = cast::transmute(self.data);
-            // Must be acquire+release, not just release, to make sure this
-            // doesn't get reordered to after the unwrapper pointer load.
-            let old_count = data.count.fetch_sub(1, SeqCst);
-            assert!(old_count >= 1);
-            if old_count == 1 {
-                // Were we really last, or should we hand off to an
-                // unwrapper? It's safe to not xchg because the unwrapper
-                // will set the unwrap lock *before* dropping his/her
-                // reference. In effect, being here means we're the only
-                // *awake* task with the data.
-                match data.unwrapper.take(Acquire) {
-                    Some(~(message, response)) => {
-                        // Send 'ready' and wait for a response.
-                        message.send(());
-                        // Unkillable wait. Message guaranteed to come.
-                        if response.recv() {
-                            // Other task got the data.
-                            cast::forget(data);
-                        } else {
-                            // Other task was killed. drop glue takes over.
-                        }
-                    }
-                    None => {
-                        // drop glue takes over.
-                    }
-                }
-            } else {
-                cast::forget(data);
-            }
-        }
-    }
-}
-
-
-/****************************************************************************/
-
-pub struct AtomicGuard {
-    on: bool,
-}
-
-impl Drop for AtomicGuard {
-    fn drop(&mut self) {
-        use rt::task::{Task, GreenTask, SchedTask};
-        use rt::local::Local;
-
-        if self.on {
-            unsafe {
-                let task_opt: Option<*mut Task> = Local::try_unsafe_borrow();
-                match task_opt {
-                    Some(t) => {
-                        match (*t).task_type {
-                            GreenTask(_) => (*t).death.allow_deschedule(),
-                            SchedTask => {}
-                        }
-                    }
-                    None => {}
-                }
-            }
-        }
-    }
-}
-
-/**
- * Enables a runtime assertion that no operation while the returned guard is
- * live uses scheduler operations (deschedule, recv, spawn, etc). This is for
- * use with pthread mutexes, which may block the entire scheduler thread,
- * rather than just one task, and is hence prone to deadlocks if mixed with
- * descheduling.
- *
- * NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section
- * synchronization whatsoever. It only makes sense to use for CPU-local issues.
- */
-// FIXME(#8140) should not be pub
-pub unsafe fn atomic() -> AtomicGuard {
-    use rt::task::{Task, GreenTask, SchedTask};
-    use rt::local::Local;
-
-    let task_opt: Option<*mut Task> = Local::try_unsafe_borrow();
-    match task_opt {
-        Some(t) => {
-            match (*t).task_type {
-                GreenTask(_) => {
-                    (*t).death.inhibit_deschedule();
-                    return AtomicGuard {
-                        on: true,
-                    };
-                }
-                SchedTask => {}
-            }
-        }
-        None => {}
-    }
-
-    AtomicGuard {
-        on: false,
-    }
-}
+use ops::Drop;
+use option::{Option,Some,None};
+use sync::arc::UnsafeArc;
+use unstable::mutex::Mutex;
 
 pub struct LittleLock {
     priv l: Mutex,
@@ -496,37 +155,14 @@ impl<T:Send> Exclusive<T> {
             l.wait();
         }
     }
-
-    pub fn unwrap(self) -> T {
-        let Exclusive { x: x } = self;
-        // Someday we might need to unkillably unwrap an Exclusive, but not today.
-        let inner = x.unwrap();
-        let ExData { data: user_data, .. } = inner; // will destroy the LittleLock
-        user_data
-    }
 }
 
 #[cfg(test)]
 mod tests {
     use option::*;
     use prelude::*;
-    use super::{Exclusive, UnsafeArc, atomic};
+    use super::Exclusive;
     use task;
-    use mem::size_of;
-
-    //#[unsafe_no_drop_flag] FIXME: #9758
-    #[ignore]
-    #[test]
-    fn test_size() {
-        assert_eq!(size_of::<UnsafeArc<[int, ..10]>>(), size_of::<*[int, ..10]>());
-    }
-
-    #[test]
-    fn test_atomic() {
-        // NB. The whole runtime will abort on an 'atomic-sleep' violation,
-        // so we can't really test for the converse behaviour.
-        unsafe { let _ = atomic(); } // oughtn't fail
-    }
 
     #[test]
     fn exclusive_new_arc() {
@@ -570,114 +206,4 @@ mod tests {
             x.with(|one| assert_eq!(*one, 1));
         }
     }
-
-    #[test]
-    fn arclike_newN() {
-        // Tests that the many-refcounts-at-once constructors don't leak.
-        let _ = UnsafeArc::new2(~~"hello");
-        let x = UnsafeArc::newN(~~"hello", 0);
-        assert_eq!(x.len(), 0)
-        let x = UnsafeArc::newN(~~"hello", 1);
-        assert_eq!(x.len(), 1)
-        let x = UnsafeArc::newN(~~"hello", 10);
-        assert_eq!(x.len(), 10)
-    }
-
-    #[test]
-    fn arclike_cloneN() {
-        // Tests that the many-refcounts-at-once special-clone doesn't leak.
-        let x = UnsafeArc::new(~~"hello");
-        let x = x.cloneN(0);
-        assert_eq!(x.len(), 0);
-        let x = UnsafeArc::new(~~"hello");
-        let x = x.cloneN(1);
-        assert_eq!(x.len(), 1);
-        let x = UnsafeArc::new(~~"hello");
-        let x = x.cloneN(10);
-        assert_eq!(x.len(), 10);
-    }
-
-    #[test]
-    fn arclike_unwrap_basic() {
-        let x = UnsafeArc::new(~~"hello");
-        assert!(x.unwrap() == ~~"hello");
-    }
-
-    #[test]
-    fn arclike_try_unwrap() {
-        let x = UnsafeArc::new(~~"hello");
-        assert!(x.try_unwrap().expect_t("try_unwrap failed") == ~~"hello");
-    }
-
-    #[test]
-    fn arclike_try_unwrap_fail() {
-        let x = UnsafeArc::new(~~"hello");
-        let x2 = x.clone();
-        let left_x = x.try_unwrap();
-        assert!(left_x.is_self());
-        drop(left_x);
-        assert!(x2.try_unwrap().expect_t("try_unwrap none") == ~~"hello");
-    }
-
-    #[test]
-    fn arclike_try_unwrap_unwrap_race() {
-        // When an unwrap and a try_unwrap race, the unwrapper should always win.
-        let x = UnsafeArc::new(~~"hello");
-        let x2 = x.clone();
-        let (p,c) = Chan::new();
-        do task::spawn {
-            c.send(());
-            assert!(x2.unwrap() == ~~"hello");
-            c.send(());
-        }
-        p.recv();
-        task::deschedule(); // Try to make the unwrapper get blocked first.
-        let left_x = x.try_unwrap();
-        assert!(left_x.is_self());
-        drop(left_x);
-        p.recv();
-    }
-
-    #[test]
-    fn exclusive_new_unwrap_basic() {
-        // Unlike the above, also tests no double-freeing of the LittleLock.
-        let x = Exclusive::new(~~"hello");
-        assert!(x.unwrap() == ~~"hello");
-    }
-
-    #[test]
-    fn exclusive_new_unwrap_contended() {
-        let x = Exclusive::new(~~"hello");
-        let x2 = x.clone();
-        do task::spawn {
-            unsafe { x2.with(|_hello| ()); }
-            task::deschedule();
-        }
-        assert!(x.unwrap() == ~~"hello");
-
-        // Now try the same thing, but with the child task blocking.
-        let x = Exclusive::new(~~"hello");
-        let x2 = x.clone();
-        let mut builder = task::task();
-        let res = builder.future_result();
-        do builder.spawn {
-            assert!(x2.unwrap() == ~~"hello");
-        }
-        // Have to get rid of our reference before blocking.
-        drop(x);
-        res.recv();
-    }
-
-    #[test] #[should_fail]
-    fn exclusive_new_unwrap_conflict() {
-        let x = Exclusive::new(~~"hello");
-        let x2 = x.clone();
-        let mut builder = task::task();
-        let res = builder.future_result();
-        do builder.spawn {
-            assert!(x2.unwrap() == ~~"hello");
-        }
-        assert!(x.unwrap() == ~~"hello");
-        assert!(res.recv().is_ok());
-    }
 }
diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs
index 97d4c2f6d1b..86f28c28f69 100644
--- a/src/libstd/vec.rs
+++ b/src/libstd/vec.rs
@@ -2874,7 +2874,6 @@ impl<A> Extendable<A> for ~[A] {
 
 #[cfg(test)]
 mod tests {
-    use option::{None, Some};
     use mem;
     use vec::*;
     use cmp::*;