about summary refs log tree commit diff
path: root/src/libstd/rt
diff options
context:
space:
mode:
authorMarvin Löbel <loebel.marvin@gmail.com>2013-10-11 23:20:34 +0200
committerMarvin Löbel <loebel.marvin@gmail.com>2013-10-28 08:50:32 +0100
commitfa8e71a8257f4226ab532d4bf268d3ecbfa98eb4 (patch)
tree0b8051814dd8a5ef08e663c172e2b456065d625d /src/libstd/rt
parentcb5b21eba713ff3888b2741db4c9e7d841cfde02 (diff)
downloadrust-fa8e71a8257f4226ab532d4bf268d3ecbfa98eb4.tar.gz
rust-fa8e71a8257f4226ab532d4bf268d3ecbfa98eb4.zip
Allow fail messages to be caught, and introduce the Any trait
Some code cleanup, sorting of import blocks

Removed std::unstable::UnsafeArc's use of Either

Added run-fail tests for the new FailWithCause impls

Changed future_result and try to return Result<(), ~Any>.

- Internally, there is an enum of possible fail messages passend around.
- In case of linked failure or a string message, the ~Any gets
  lazyly allocated in future_results recv method.
- For that, future result now returns a wrapper around a Port.
- Moved and renamed task::TaskResult into rt::task::UnwindResult
  and made it an internal enum.
- Introduced a replacement typedef `type TaskResult = Result<(), ~Any>`.
Diffstat (limited to 'src/libstd/rt')
-rw-r--r--src/libstd/rt/io/option.rs2
-rw-r--r--src/libstd/rt/io/signal.rs5
-rw-r--r--src/libstd/rt/kill.rs34
-rw-r--r--src/libstd/rt/mod.rs7
-rw-r--r--src/libstd/rt/sched.rs4
-rw-r--r--src/libstd/rt/task.rs174
-rw-r--r--src/libstd/rt/test.rs47
-rw-r--r--src/libstd/rt/uv/uvio.rs19
8 files changed, 192 insertions, 100 deletions
diff --git a/src/libstd/rt/io/option.rs b/src/libstd/rt/io/option.rs
index 52699964b62..234b46458b4 100644
--- a/src/libstd/rt/io/option.rs
+++ b/src/libstd/rt/io/option.rs
@@ -107,7 +107,7 @@ mod test {
     use option::*;
     use super::super::mem::*;
     use rt::test::*;
-    use super::super::{PreviousIoError, io_error, io_error};
+    use super::super::{PreviousIoError, io_error};
 
     #[test]
     fn test_option_writer() {
diff --git a/src/libstd/rt/io/signal.rs b/src/libstd/rt/io/signal.rs
index d2266c8d5d6..4c6c675df03 100644
--- a/src/libstd/rt/io/signal.rs
+++ b/src/libstd/rt/io/signal.rs
@@ -145,10 +145,10 @@ impl Listener {
 
 #[cfg(test)]
 mod test {
+    use super::*;
+
     use libc;
     use rt::io::timer;
-    use rt::io;
-    use super::*;
 
     // kill is only available on Unixes
     #[cfg(unix)]
@@ -206,6 +206,7 @@ mod test {
     #[cfg(windows)]
     #[test]
     fn test_io_signal_invalid_signum() {
+        use rt::io;
         let mut s = Listener::new();
         let mut called = false;
         do io::io_error::cond.trap(|_| {
diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs
index 19f17ca966d..edf6ffb820b 100644
--- a/src/libstd/rt/kill.rs
+++ b/src/libstd/rt/kill.rs
@@ -152,14 +152,15 @@ There are two known issues with the current scheme for exit code propagation.
 
 use cast;
 use cell::Cell;
-use either::{Either, Left, Right};
 use option::{Option, Some, None};
 use prelude::*;
 use rt::task::Task;
+use rt::task::UnwindReasonLinked;
+use rt::task::{UnwindResult, Failure};
 use task::spawn::Taskgroup;
 use to_bytes::IterBytes;
 use unstable::atomics::{AtomicUint, Relaxed};
-use unstable::sync::{UnsafeArc, LittleLock};
+use unstable::sync::{UnsafeArc, UnsafeArcSelf, UnsafeArcT, LittleLock};
 use util;
 
 static KILLED_MSG: &'static str = "killed by linked failure";
@@ -222,7 +223,7 @@ pub struct Death {
     priv watching_parent: Option<KillHandle>,
     // Action to be done with the exit code. If set, also makes the task wait
     // until all its watched children exit before collecting the status.
-    on_exit:         Option<~fn(bool)>,
+    on_exit:         Option<~fn(UnwindResult)>,
     // nesting level counter for task::unkillable calls (0 == killable).
     priv unkillable:      int,
     // nesting level counter for unstable::atomically calls (0 == can deschedule).
@@ -478,7 +479,7 @@ impl KillHandle {
         match self.try_unwrap() {
             // Couldn't unwrap; children still alive. Reparent entire handle as
             // our own tombstone, to be unwrapped later.
-            Left(this) => {
+            UnsafeArcSelf(this) => {
                 let this = Cell::new(this); // :(
                 do add_lazy_tombstone(parent) |other_tombstones| {
                     let this = Cell::new(this.take()); // :(
@@ -494,14 +495,16 @@ impl KillHandle {
                     }
                 }
             }
+
             // Whether or not all children exited, one or more already failed.
-            Right(KillHandleInner { any_child_failed: true, _ }) => {
+            UnsafeArcT(KillHandleInner { any_child_failed: true, _ }) => {
                 parent.notify_immediate_failure();
             }
+
             // All children exited, but some left behind tombstones that we
             // don't want to wait on now. Give them to our parent.
-            Right(KillHandleInner { any_child_failed: false,
-                                    child_tombstones: Some(f), _ }) => {
+            UnsafeArcT(KillHandleInner { any_child_failed: false,
+                                         child_tombstones: Some(f), _ }) => {
                 let f = Cell::new(f); // :(
                 do add_lazy_tombstone(parent) |other_tombstones| {
                     let f = Cell::new(f.take()); // :(
@@ -513,9 +516,10 @@ impl KillHandle {
                     }
                 }
             }
+
             // All children exited, none failed. Nothing to do!
-            Right(KillHandleInner { any_child_failed: false,
-                                    child_tombstones: None, _ }) => { }
+            UnsafeArcT(KillHandleInner { any_child_failed: false,
+                                         child_tombstones: None, _ }) => { }
         }
 
         // NB: Takes a pthread mutex -- 'blk' not allowed to reschedule.
@@ -562,7 +566,7 @@ impl Death {
     }
 
     /// Collect failure exit codes from children and propagate them to a parent.
-    pub fn collect_failure(&mut self, mut success: bool, group: Option<Taskgroup>) {
+    pub fn collect_failure(&mut self, result: UnwindResult, group: Option<Taskgroup>) {
         // This may run after the task has already failed, so even though the
         // task appears to need to be killed, the scheduler should not fail us
         // when we block to unwrap.
@@ -576,19 +580,27 @@ impl Death {
         // FIXME(#8192): Doesn't work with "let _ = ..."
         { use util; util::ignore(group); }
 
+        let mut success = result.is_success();
+        let mut result = Cell::new(result);
+
         // Step 1. Decide if we need to collect child failures synchronously.
         do self.on_exit.take().map |on_exit| {
             if success {
                 // We succeeded, but our children might not. Need to wait for them.
                 let mut inner = self.kill_handle.take_unwrap().unwrap();
+
                 if inner.any_child_failed {
                     success = false;
                 } else {
                     // Lockless access to tombstones protected by unwrap barrier.
                     success = inner.child_tombstones.take().map_default(true, |f| f());
                 }
+
+                if !success {
+                    result = Cell::new(Failure(UnwindReasonLinked));
+                }
             }
-            on_exit(success);
+            on_exit(result.take());
         };
 
         // Step 2. Possibly alert possibly-watching parent to failure status.
diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs
index d87580c83bf..eaaf8c43281 100644
--- a/src/libstd/rt/mod.rs
+++ b/src/libstd/rt/mod.rs
@@ -66,12 +66,13 @@ use ptr::RawPtr;
 use rt::local::Local;
 use rt::sched::{Scheduler, Shutdown};
 use rt::sleeper_list::SleeperList;
+use rt::task::UnwindResult;
 use rt::task::{Task, SchedTask, GreenTask, Sched};
 use rt::uv::uvio::UvEventLoop;
 use unstable::atomics::{AtomicInt, AtomicBool, SeqCst};
 use unstable::sync::UnsafeArc;
-use vec;
 use vec::{OwnedVector, MutableVector, ImmutableVector};
+use vec;
 
 use self::thread::Thread;
 use self::work_queue::WorkQueue;
@@ -343,7 +344,7 @@ fn run_(main: ~fn(), use_main_sched: bool) -> int {
     // When the main task exits, after all the tasks in the main
     // task tree, shut down the schedulers and set the exit code.
     let handles = Cell::new(handles);
-    let on_exit: ~fn(bool) = |exit_success| {
+    let on_exit: ~fn(UnwindResult) = |exit_success| {
         unsafe {
             assert!(!(*exited_already.get()).swap(true, SeqCst),
                     "the runtime already exited");
@@ -355,7 +356,7 @@ fn run_(main: ~fn(), use_main_sched: bool) -> int {
         }
 
         unsafe {
-            let exit_code = if exit_success {
+            let exit_code = if exit_success.is_success() {
                 use rt::util;
 
                 // If we're exiting successfully, then return the global
diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs
index 9965380d9dc..d44264befc1 100644
--- a/src/libstd/rt/sched.rs
+++ b/src/libstd/rt/sched.rs
@@ -915,7 +915,6 @@ mod test {
     use rt::test::*;
     use unstable::run_in_bare_thread;
     use borrow::to_uint;
-    use rt::local::*;
     use rt::sched::{Scheduler};
     use cell::Cell;
     use rt::thread::Thread;
@@ -923,6 +922,7 @@ mod test {
     use rt::basic;
     use rt::util;
     use option::{Some};
+    use rt::task::UnwindResult;
 
     #[test]
     fn trivial_run_in_newsched_task_test() {
@@ -1007,7 +1007,7 @@ mod test {
                 assert!(Task::on_appropriate_sched());
             };
 
-            let on_exit: ~fn(bool) = |exit_status| rtassert!(exit_status);
+            let on_exit: ~fn(UnwindResult) = |exit_status| rtassert!(exit_status.is_success());
             task.death.on_exit = Some(on_exit);
 
             sched.bootstrap(task);
diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs
index f82eb929a39..8f695763a25 100644
--- a/src/libstd/rt/task.rs
+++ b/src/libstd/rt/task.rs
@@ -13,29 +13,31 @@
 //! local storage, and logging. Even a 'freestanding' Rust would likely want
 //! to implement this.
 
+use super::local_heap::LocalHeap;
+
+use prelude::*;
+
 use borrow;
 use cast::transmute;
+use cell::Cell;
 use cleanup;
-use local_data;
 use libc::{c_void, uintptr_t, c_char, size_t};
-use prelude::*;
+use local_data;
 use option::{Option, Some, None};
-use rt::borrowck;
 use rt::borrowck::BorrowRecord;
+use rt::borrowck;
+use rt::context::Context;
+use rt::context;
 use rt::env;
 use rt::io::Writer;
 use rt::kill::Death;
 use rt::local::Local;
 use rt::logging::StdErrLogger;
-use super::local_heap::LocalHeap;
 use rt::sched::{Scheduler, SchedHandle};
 use rt::stack::{StackSegment, StackPool};
-use rt::context;
-use rt::context::Context;
-use unstable::finally::Finally;
-use task::spawn::Taskgroup;
-use cell::Cell;
 use send_str::SendStr;
+use task::spawn::Taskgroup;
+use unstable::finally::Finally;
 
 // The Task struct represents all state associated with a rust
 // task. There are at this point two primary "subtypes" of task,
@@ -85,8 +87,61 @@ pub enum SchedHome {
 pub struct GarbageCollector;
 pub struct LocalStorage(Option<local_data::Map>);
 
+/// Represents the reason for the current unwinding process
+pub enum UnwindResult {
+    /// The task is ending successfully
+    Success,
+
+    /// The Task is failing with reason `UnwindReason`
+    Failure(UnwindReason),
+}
+
+impl UnwindResult {
+    /// Returns `true` if this `UnwindResult` is a failure
+    #[inline]
+    pub fn is_failure(&self) -> bool {
+        match *self {
+            Success => false,
+            Failure(_) => true
+        }
+    }
+
+    /// Returns `true` if this `UnwindResult` is a success
+    #[inline]
+    pub fn is_success(&self) -> bool {
+        match *self {
+            Success => true,
+            Failure(_) => false
+        }
+    }
+}
+
+/// Represents the cause of a task failure
+#[deriving(ToStr)]
+pub enum UnwindReason {
+    /// Failed with a string message
+    UnwindReasonStr(SendStr),
+
+    /// Failed with an `~Any`
+    UnwindReasonAny(~Any),
+
+    /// Failed because of linked failure
+    UnwindReasonLinked
+}
+
 pub struct Unwinder {
     unwinding: bool,
+    cause: Option<UnwindReason>
+}
+
+impl Unwinder {
+    fn to_unwind_result(&mut self) -> UnwindResult {
+        if self.unwinding {
+            Failure(self.cause.take().unwrap())
+        } else {
+            Success
+        }
+    }
 }
 
 impl Task {
@@ -135,7 +190,7 @@ impl Task {
             gc: GarbageCollector,
             storage: LocalStorage(None),
             logger: StdErrLogger::new(),
-            unwinder: Unwinder { unwinding: false },
+            unwinder: Unwinder { unwinding: false, cause: None },
             taskgroup: None,
             death: Death::new(),
             destroyed: false,
@@ -170,7 +225,7 @@ impl Task {
             gc: GarbageCollector,
             storage: LocalStorage(None),
             logger: StdErrLogger::new(),
-            unwinder: Unwinder { unwinding: false },
+            unwinder: Unwinder { unwinding: false, cause: None },
             taskgroup: None,
             death: Death::new(),
             destroyed: false,
@@ -193,7 +248,7 @@ impl Task {
             gc: GarbageCollector,
             storage: LocalStorage(None),
             logger: StdErrLogger::new(),
-            unwinder: Unwinder { unwinding: false },
+            unwinder: Unwinder { unwinding: false, cause: None },
             taskgroup: None,
             // FIXME(#7544) make watching optional
             death: self.death.new_child(),
@@ -284,7 +339,7 @@ impl Task {
         // the unkillable counter is set. This is necessary for when the
         // taskgroup destruction code drops references on KillHandles, which
         // might require using unkillable (to synchronize with an unwrapper).
-        self.death.collect_failure(!self.unwinder.unwinding, self.taskgroup.take());
+        self.death.collect_failure(self.unwinder.to_unwind_result(), self.taskgroup.take());
         self.destroyed = true;
     }
 
@@ -469,10 +524,11 @@ impl Unwinder {
         }
     }
 
-    pub fn begin_unwind(&mut self) -> ! {
+    pub fn begin_unwind(&mut self, cause: UnwindReason) -> ! {
         #[fixed_stack_segment]; #[inline(never)];
 
         self.unwinding = true;
+        self.cause = Some(cause);
         unsafe {
             rust_begin_unwind(UNWIND_TOKEN);
             return transmute(());
@@ -561,55 +617,73 @@ pub extern "C" fn rust_stack_exhausted() {
 }
 
 /// This is the entry point of unwinding for things like lang items and such.
-/// The arguments are normally generated by the compiler.
+/// The arguments are normally generated by the compiler, and need to
+/// have static lifetimes.
 pub fn begin_unwind(msg: *c_char, file: *c_char, line: size_t) -> ! {
+    use c_str::CString;
+    use cast::transmute;
+
+    #[inline]
+    fn static_char_ptr(p: *c_char) -> &'static str {
+        let s = unsafe { CString::new(p, false) };
+        match s.as_str() {
+            Some(s) => unsafe { transmute::<&str, &'static str>(s) },
+            None => rtabort!("message wasn't utf8?")
+        }
+    }
+
+    let msg = static_char_ptr(msg);
+    let file = static_char_ptr(file);
+
+    begin_unwind_reason(UnwindReasonStr(msg.into_send_str()), file, line as uint)
+}
+
+/// This is the entry point of unwinding for fail!() and assert!().
+pub fn begin_unwind_reason(reason: UnwindReason, file: &'static str, line: uint) -> ! {
     use rt::in_green_task_context;
     use rt::task::Task;
     use rt::local::Local;
     use str::Str;
-    use c_str::CString;
     use unstable::intrinsics;
 
     unsafe {
-        let msg = CString::new(msg, false);
-        let file = CString::new(file, false);
-        let msg = match msg.as_str() {
-            Some(s) => s, None => rtabort!("message wasn't utf8?")
-        };
+        // Be careful not to allocate in this block, if we're failing we may
+        // have been failing due to a lack of memory in the first place...
 
-        if !in_green_task_context() {
-            match file.as_str() {
-                Some(file) => {
-                    rterrln!("failed in non-task context at '{}', {}:{}",
-                             msg, file, line as int);
-                }
-                None => rterrln!("failed in non-task context at '{}'", msg)
+        let task: *mut Task;
+
+        {
+            let msg = match reason {
+                UnwindReasonStr(ref s) => s.as_slice(),
+                UnwindReasonAny(_)     => "~Any",
+                UnwindReasonLinked     => "linked failure",
+            };
+
+            if !in_green_task_context() {
+                rterrln!("failed in non-task context at '{}', {}:{}",
+                        msg, file, line);
+                intrinsics::abort();
             }
-            intrinsics::abort();
-        }
 
-        // Be careful not to allocate in this block, if we're failing we may
-        // have been failing due to a lack of memory in the first place...
-        let task: *mut Task = Local::unsafe_borrow();
-        let n = (*task).name.as_ref().map(|n| n.as_slice()).unwrap_or("<unnamed>");
-
-        // XXX: this should no get forcibly printed to the console, this should
-        //      either be sent to the parent task (ideally), or get printed to
-        //      the task's logger. Right now the logger is actually a uvio
-        //      instance, which uses unkillable blocks internally for various
-        //      reasons. This will cause serious trouble if the task is failing
-        //      due to mismanagment of its own kill flag, so calling our own
-        //      logger in its current state is a bit of a problem.
-        match file.as_str() {
-            Some(file) => {
-                rterrln!("task '{}' failed at '{}', {}:{}", n, msg, file, line);
+            task = Local::unsafe_borrow();
+            let n = (*task).name.as_ref().map(|n| n.as_slice()).unwrap_or("<unnamed>");
+
+            // XXX: this should no get forcibly printed to the console, this should
+            //      either be sent to the parent task (ideally), or get printed to
+            //      the task's logger. Right now the logger is actually a uvio
+            //      instance, which uses unkillable blocks internally for various
+            //      reasons. This will cause serious trouble if the task is failing
+            //      due to mismanagment of its own kill flag, so calling our own
+            //      logger in its current state is a bit of a problem.
+
+            rterrln!("task '{}' failed at '{}', {}:{}", n, msg, file, line);
+
+            if (*task).unwinder.unwinding {
+                rtabort!("unwinding again");
             }
-            None => rterrln!("task '{}' failed at '{}'", n, msg),
         }
-        if (*task).unwinder.unwinding {
-            rtabort!("unwinding again");
-        }
-        (*task).unwinder.begin_unwind();
+
+        (*task).unwinder.begin_unwind(reason);
     }
 }
 
diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs
index e4bbfe0a5a3..5f78b9fc959 100644
--- a/src/libstd/rt/test.rs
+++ b/src/libstd/rt/test.rs
@@ -8,30 +8,32 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use rand;
-use rand::Rng;
-use os;
-use libc;
-use option::{Some, None};
-use path::Path;
+use super::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
+
 use cell::Cell;
 use clone::Clone;
 use container::Container;
 use iter::{Iterator, range};
-use super::io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr};
-use vec::{OwnedVector, MutableVector, ImmutableVector};
+use libc;
+use option::{Some, None};
+use os;
 use path::GenericPath;
+use path::Path;
+use rand::Rng;
+use rand;
+use result::{Result, Ok, Err};
 use rt::basic;
-use rt::sched::Scheduler;
+use rt::comm::oneshot;
 use rt::rtio::EventLoop;
-use unstable::{run_in_bare_thread};
-use rt::thread::Thread;
+use rt::sched::Scheduler;
+use rt::sleeper_list::SleeperList;
 use rt::task::Task;
+use rt::task::UnwindResult;
+use rt::thread::Thread;
 use rt::uv::uvio::UvEventLoop;
 use rt::work_queue::WorkQueue;
-use rt::sleeper_list::SleeperList;
-use rt::comm::oneshot;
-use result::{Result, Ok, Err};
+use unstable::{run_in_bare_thread};
+use vec::{OwnedVector, MutableVector, ImmutableVector};
 
 pub fn new_test_uv_sched() -> Scheduler {
 
@@ -85,9 +87,9 @@ pub fn run_in_uv_task_core(f: ~fn()) {
     let mut sched = ~new_test_uv_sched();
     let exit_handle = Cell::new(sched.make_handle());
 
-    let on_exit: ~fn(bool) = |exit_status| {
+    let on_exit: ~fn(UnwindResult) = |exit_status| {
         exit_handle.take().send(Shutdown);
-        rtassert!(exit_status);
+        rtassert!(exit_status.is_success());
     };
     let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
     task.death.on_exit = Some(on_exit);
@@ -96,15 +98,14 @@ pub fn run_in_uv_task_core(f: ~fn()) {
 }
 
 pub fn run_in_newsched_task_core(f: ~fn()) {
-
     use rt::sched::Shutdown;
 
     let mut sched = ~new_test_sched();
     let exit_handle = Cell::new(sched.make_handle());
 
-    let on_exit: ~fn(bool) = |exit_status| {
+    let on_exit: ~fn(UnwindResult) = |exit_status| {
         exit_handle.take().send(Shutdown);
-        rtassert!(exit_status);
+        rtassert!(exit_status.is_success());
     };
     let mut task = ~Task::new_root(&mut sched.stack_pool, None, f);
     task.death.on_exit = Some(on_exit);
@@ -248,14 +249,14 @@ pub fn run_in_mt_newsched_task(f: ~fn()) {
         }
 
         let handles = Cell::new(handles);
-        let on_exit: ~fn(bool) = |exit_status| {
+        let on_exit: ~fn(UnwindResult) = |exit_status| {
             let mut handles = handles.take();
             // Tell schedulers to exit
             for handle in handles.mut_iter() {
                 handle.send(Shutdown);
             }
 
-            rtassert!(exit_status);
+            rtassert!(exit_status.is_success());
         };
         let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, None, f.take());
         main_task.death.on_exit = Some(on_exit);
@@ -323,7 +324,7 @@ pub fn spawntask_try(f: ~fn()) -> Result<(),()> {
 
     let (port, chan) = oneshot();
     let chan = Cell::new(chan);
-    let on_exit: ~fn(bool) = |exit_status| chan.take().send(exit_status);
+    let on_exit: ~fn(UnwindResult) = |exit_status| chan.take().send(exit_status);
 
     let mut new_task = Task::build_root(None, f);
     new_task.death.on_exit = Some(on_exit);
@@ -331,7 +332,7 @@ pub fn spawntask_try(f: ~fn()) -> Result<(),()> {
     Scheduler::run_task(new_task);
 
     let exit_status = port.recv();
-    if exit_status { Ok(()) } else { Err(()) }
+    if exit_status.is_success() { Ok(()) } else { Err(()) }
 
 }
 
diff --git a/src/libstd/rt/uv/uvio.rs b/src/libstd/rt/uv/uvio.rs
index e0707a86f7b..5643f6445f1 100644
--- a/src/libstd/rt/uv/uvio.rs
+++ b/src/libstd/rt/uv/uvio.rs
@@ -1899,6 +1899,7 @@ fn test_simple_homed_udp_io_bind_then_move_task_then_home_and_close() {
     use rt::thread::Thread;
     use rt::task::Task;
     use rt::sched::{Shutdown, TaskFromFriend};
+    use rt::task::UnwindResult;
     do run_in_bare_thread {
         let sleepers = SleeperList::new();
         let work_queue1 = WorkQueue::new();
@@ -1916,10 +1917,10 @@ fn test_simple_homed_udp_io_bind_then_move_task_then_home_and_close() {
         let handle2 = Cell::new(sched2.make_handle());
         let tasksFriendHandle = Cell::new(sched2.make_handle());
 
-        let on_exit: ~fn(bool) = |exit_status| {
+        let on_exit: ~fn(UnwindResult) = |exit_status| {
             handle1.take().send(Shutdown);
             handle2.take().send(Shutdown);
-            rtassert!(exit_status);
+            rtassert!(exit_status.is_success());
         };
 
         let test_function: ~fn() = || {
@@ -1978,6 +1979,7 @@ fn test_simple_homed_udp_io_bind_then_move_handle_then_home_and_close() {
     use rt::task::Task;
     use rt::comm::oneshot;
     use rt::sched::Shutdown;
+    use rt::task::UnwindResult;
     do run_in_bare_thread {
         let sleepers = SleeperList::new();
         let work_queue1 = WorkQueue::new();
@@ -2017,10 +2019,10 @@ fn test_simple_homed_udp_io_bind_then_move_handle_then_home_and_close() {
              */
         };
 
-        let on_exit: ~fn(bool) = |exit| {
+        let on_exit: ~fn(UnwindResult) = |exit| {
             handle1.take().send(Shutdown);
             handle2.take().send(Shutdown);
-            rtassert!(exit);
+            rtassert!(exit.is_success());
         };
 
         let task1 = Cell::new(~Task::new_root(&mut sched1.stack_pool, None, body1));
@@ -2088,6 +2090,7 @@ fn test_simple_tcp_server_and_client_on_diff_threads() {
     use rt::thread::Thread;
     use rt::task::Task;
     use rt::sched::{Shutdown};
+    use rt::task::UnwindResult;
     do run_in_bare_thread {
         let sleepers = SleeperList::new();
 
@@ -2108,14 +2111,14 @@ fn test_simple_tcp_server_and_client_on_diff_threads() {
         let server_handle = Cell::new(server_sched.make_handle());
         let client_handle = Cell::new(client_sched.make_handle());
 
-        let server_on_exit: ~fn(bool) = |exit_status| {
+        let server_on_exit: ~fn(UnwindResult) = |exit_status| {
             server_handle.take().send(Shutdown);
-            rtassert!(exit_status);
+            rtassert!(exit_status.is_success());
         };
 
-        let client_on_exit: ~fn(bool) = |exit_status| {
+        let client_on_exit: ~fn(UnwindResult) = |exit_status| {
             client_handle.take().send(Shutdown);
-            rtassert!(exit_status);
+            rtassert!(exit_status.is_success());
         };
 
         let server_fn: ~fn() = || {