about summary refs log tree commit diff
path: root/src/libstd/rt/kill.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/libstd/rt/kill.rs')
-rw-r--r--src/libstd/rt/kill.rs743
1 files changed, 30 insertions, 713 deletions
diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs
index fa4746a6bb7..ac13b12c9ad 100644
--- a/src/libstd/rt/kill.rs
+++ b/src/libstd/rt/kill.rs
@@ -155,176 +155,59 @@ use cell::Cell;
 use option::{Option, Some, None};
 use prelude::*;
 use rt::task::Task;
-use rt::task::{UnwindResult, Failure};
-use task::spawn::Taskgroup;
-use task::LinkedFailure;
-use to_bytes::IterBytes;
-use unstable::atomics::{AtomicUint, Relaxed};
-use unstable::sync::{UnsafeArc, UnsafeArcSelf, UnsafeArcT, LittleLock};
-use util;
-
-static KILLED_MSG: &'static str = "killed by linked failure";
-
-// State values for the 'killed' and 'unkillable' atomic flags below.
-static KILL_RUNNING:    uint = 0;
-static KILL_KILLED:     uint = 1;
-static KILL_UNKILLABLE: uint = 2;
-
-struct KillFlag(AtomicUint);
-type KillFlagHandle = UnsafeArc<KillFlag>;
+use rt::task::UnwindResult;
+use unstable::atomics::{AtomicUint, SeqCst};
+use unstable::sync::UnsafeArc;
 
 /// A handle to a blocked task. Usually this means having the ~Task pointer by
 /// ownership, but if the task is killable, a killer can steal it at any time.
 pub enum BlockedTask {
-    Unkillable(~Task),
-    Killable(KillFlagHandle),
-}
-
-// FIXME(#7544)(bblum): think about the cache efficiency of this
-struct KillHandleInner {
-    // Is the task running, blocked, or killed? Possible values:
-    // * KILL_RUNNING    - Not unkillable, no kill pending.
-    // * KILL_KILLED     - Kill pending.
-    // * <ptr>           - A transmuted blocked ~Task pointer.
-    // This flag is refcounted because it may also be referenced by a blocking
-    // concurrency primitive, used to wake the task normally, whose reference
-    // may outlive the handle's if the task is killed.
-    killed: KillFlagHandle,
-    // Has the task deferred kill signals? This flag guards the above one.
-    // Possible values:
-    // * KILL_RUNNING    - Not unkillable, no kill pending.
-    // * KILL_KILLED     - Kill pending.
-    // * KILL_UNKILLABLE - Kill signals deferred.
-    unkillable: AtomicUint,
-
-    // Shared state between task and children for exit code propagation. These
-    // are here so we can re-use the kill handle to implement watched children
-    // tasks. Using a separate Arc-like would introduce extra atomic adds/subs
-    // into common spawn paths, so this is just for speed.
-
-    // Locklessly accessed; protected by the enclosing refcount's barriers.
-    any_child_failed: bool,
-    // A lazy list, consuming which may unwrap() many child tombstones.
-    child_tombstones: Option<proc() -> bool>,
-    // Protects multiple children simultaneously creating tombstones.
-    graveyard_lock: LittleLock,
+    Owned(~Task),
+    Shared(UnsafeArc<AtomicUint>),
 }
 
-/// State shared between tasks used for task killing during linked failure.
-#[deriving(Clone)]
-pub struct KillHandle(UnsafeArc<KillHandleInner>);
-
 /// Per-task state related to task death, killing, failure, etc.
 pub struct Death {
-    // Shared among this task, its watched children, and any linked tasks who
-    // might kill it. This is optional so we can take it by-value at exit time.
-    kill_handle:     Option<KillHandle>,
-    // Handle to a watching parent, if we have one, for exit code propagation.
-    priv watching_parent: Option<KillHandle>,
     // Action to be done with the exit code. If set, also makes the task wait
     // until all its watched children exit before collecting the status.
     on_exit:         Option<proc(UnwindResult)>,
-    // nesting level counter for task::unkillable calls (0 == killable).
-    priv unkillable:      int,
     // nesting level counter for unstable::atomically calls (0 == can deschedule).
     priv wont_sleep:      int,
-    // A "spare" handle to the kill flag inside the kill handle. Used during
-    // blocking/waking as an optimization to avoid two xadds on the refcount.
-    priv spare_kill_flag: Option<KillFlagHandle>,
-}
-
-impl Drop for KillFlag {
-    // Letting a KillFlag with a task inside get dropped would leak the task.
-    // We could free it here, but the task should get awoken by hand somehow.
-    fn drop(&mut self) {
-        match self.load(Relaxed) {
-            KILL_RUNNING | KILL_KILLED => { },
-            _ => rtabort!("can't drop kill flag with a blocked task inside!"),
-        }
-    }
-}
-
-// Whenever a task blocks, it swaps out its spare kill flag to use as the
-// blocked task handle. So unblocking a task must restore that spare.
-unsafe fn revive_task_ptr(task_ptr: uint, spare_flag: Option<KillFlagHandle>) -> ~Task {
-    let mut task: ~Task = cast::transmute(task_ptr);
-    if task.death.spare_kill_flag.is_none() {
-        task.death.spare_kill_flag = spare_flag;
-    } else {
-        // A task's spare kill flag is not used for blocking in one case:
-        // when an unkillable task blocks on select. In this case, a separate
-        // one was created, which we now discard.
-        rtassert!(task.death.unkillable > 0);
-    }
-    task
 }
 
 impl BlockedTask {
     /// Returns Some if the task was successfully woken; None if already killed.
     pub fn wake(self) -> Option<~Task> {
         match self {
-            Unkillable(task) => Some(task),
-            Killable(flag_arc) => {
-                let flag = unsafe { &mut **flag_arc.get() };
-                match flag.swap(KILL_RUNNING, Relaxed) {
-                    KILL_RUNNING => None, // woken from select(), perhaps
-                    KILL_KILLED  => None, // a killer stole it already
-                    task_ptr     =>
-                        Some(unsafe { revive_task_ptr(task_ptr, Some(flag_arc)) })
+            Owned(task) => Some(task),
+            Shared(arc) => unsafe {
+                match (*arc.get()).swap(0, SeqCst) {
+                    0 => None,
+                    n => cast::transmute(n),
                 }
             }
         }
     }
 
     /// Create a blocked task, unless the task was already killed.
-    pub fn try_block(mut task: ~Task) -> Either<~Task, BlockedTask> {
-        // NB: As an optimization, we could give a free pass to being unkillable
-        // to tasks whose taskgroups haven't been initialized yet, but that
-        // introduces complications with select() and with the test cases below,
-        // and it's not clear the uncommon performance boost is worth it.
-        if task.death.unkillable > 0 {
-            Right(Unkillable(task))
-        } else {
-            rtassert!(task.death.kill_handle.is_some());
-            unsafe {
-                // The inverse of 'revive', above, occurs here.
-                // The spare kill flag will usually be Some, unless the task was
-                // already killed, in which case the killer will have deferred
-                // creating a new one until whenever it blocks during unwinding.
-                let flag_arc = match task.death.spare_kill_flag.take() {
-                    Some(spare_flag) => spare_flag,
-                    None => {
-                        // A task that kills us won't have a spare kill flag to
-                        // give back to us, so we restore it ourselves here. This
-                        // situation should only arise when we're already failing.
-                        rtassert!(task.unwinder.unwinding);
-                        (*task.death.kill_handle.get_ref().get()).killed.clone()
-                    }
-                };
-                let flag     = &mut **flag_arc.get();
-                let task_ptr = cast::transmute(task);
-                // Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
-                match flag.compare_and_swap(KILL_RUNNING, task_ptr, Relaxed) {
-                    KILL_RUNNING => Right(Killable(flag_arc)),
-                    KILL_KILLED  => Left(revive_task_ptr(task_ptr, Some(flag_arc))),
-                    x            => rtabort!("can't block task! kill flag = {}", x),
-                }
-            }
-        }
+    pub fn block(task: ~Task) -> BlockedTask {
+        Owned(task)
     }
 
     /// Converts one blocked task handle to a list of many handles to the same.
     pub fn make_selectable(self, num_handles: uint) -> ~[BlockedTask] {
         let handles = match self {
-            Unkillable(task) => {
-                let flag = unsafe { KillFlag(AtomicUint::new(cast::transmute(task))) };
+            Owned(task) => {
+                let flag = unsafe {
+                    AtomicUint::new(cast::transmute(task))
+                };
                 UnsafeArc::newN(flag, num_handles)
             }
-            Killable(flag_arc) => flag_arc.cloneN(num_handles),
+            Shared(arc) => arc.cloneN(num_handles),
         };
         // Even if the task was unkillable before, we use 'Killable' because
         // multiple pipes will have handles. It does not really mean killable.
-        handles.move_iter().map(|x| Killable(x)).collect()
+        handles.move_iter().map(|x| Shared(x)).collect()
     }
 
     // This assertion has two flavours because the wake involves an atomic op.
@@ -337,16 +220,14 @@ impl BlockedTask {
     /// Convert to an unsafe uint value. Useful for storing in a pipe's state flag.
     #[inline]
     pub unsafe fn cast_to_uint(self) -> uint {
-        // Use the low bit to distinguish the enum variants, to save a second
-        // allocation in the indestructible case.
         match self {
-            Unkillable(task) => {
+            Owned(task) => {
                 let blocked_task_ptr: uint = cast::transmute(task);
                 rtassert!(blocked_task_ptr & 0x1 == 0);
                 blocked_task_ptr
-            },
-            Killable(flag_arc) => {
-                let blocked_task_ptr: uint = cast::transmute(~flag_arc);
+            }
+            Shared(arc) => {
+                let blocked_task_ptr: uint = cast::transmute(~arc);
                 rtassert!(blocked_task_ptr & 0x1 == 0);
                 blocked_task_ptr | 0x1
             }
@@ -357,318 +238,29 @@ impl BlockedTask {
     #[inline]
     pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask {
         if blocked_task_ptr & 0x1 == 0 {
-            Unkillable(cast::transmute(blocked_task_ptr))
-        } else {
-            let ptr: ~KillFlagHandle = cast::transmute(blocked_task_ptr & !0x1);
-            match ptr {
-                ~flag_arc => Killable(flag_arc)
-            }
-        }
-    }
-}
-
-// So that KillHandle can be hashed in the taskgroup bookkeeping code.
-impl IterBytes for KillHandle {
-    fn iter_bytes(&self, lsb0: bool, f: |buf: &[u8]| -> bool) -> bool {
-        self.data.iter_bytes(lsb0, f)
-    }
-}
-impl Eq for KillHandle {
-    #[inline] fn eq(&self, other: &KillHandle) -> bool { self.data.eq(&other.data) }
-    #[inline] fn ne(&self, other: &KillHandle) -> bool { self.data.ne(&other.data) }
-}
-
-impl KillHandle {
-    pub fn new() -> (KillHandle, KillFlagHandle) {
-        let (flag, flag_clone) =
-            UnsafeArc::new2(KillFlag(AtomicUint::new(KILL_RUNNING)));
-        let handle = KillHandle(UnsafeArc::new(KillHandleInner {
-            // Linked failure fields
-            killed:     flag,
-            unkillable: AtomicUint::new(KILL_RUNNING),
-            // Exit code propagation fields
-            any_child_failed: false,
-            child_tombstones: None,
-            graveyard_lock:   LittleLock::new(),
-        }));
-        (handle, flag_clone)
-    }
-
-    // Will begin unwinding if a kill signal was received, unless already_failing.
-    // This can't be used recursively, because a task which sees a KILLED
-    // signal must fail immediately, which an already-unkillable task can't do.
-    #[inline]
-    pub fn inhibit_kill(&mut self, already_failing: bool) {
-        let inner = unsafe { &mut *self.get() };
-        // Expect flag to contain RUNNING. If KILLED, it should stay KILLED.
-        // FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
-        match inner.unkillable.compare_and_swap(KILL_RUNNING, KILL_UNKILLABLE, Relaxed) {
-            KILL_RUNNING    => { }, // normal case
-            KILL_KILLED     => if !already_failing { fail!("{}", KILLED_MSG) },
-            _               => rtabort!("inhibit_kill: task already unkillable"),
-        }
-    }
-
-    // Will begin unwinding if a kill signal was received, unless already_failing.
-    #[inline]
-    pub fn allow_kill(&mut self, already_failing: bool) {
-        let inner = unsafe { &mut *self.get() };
-        // Expect flag to contain UNKILLABLE. If KILLED, it should stay KILLED.
-        // FIXME(#7544)(bblum): is it really necessary to prohibit double kill?
-        match inner.unkillable.compare_and_swap(KILL_UNKILLABLE, KILL_RUNNING, Relaxed) {
-            KILL_UNKILLABLE => { }, // normal case
-            KILL_KILLED     => if !already_failing { fail!("{}", KILLED_MSG) },
-            _               => rtabort!("allow_kill: task already killable"),
-        }
-    }
-
-    // Send a kill signal to the handle's owning task. Returns the task itself
-    // if it was blocked and needs punted awake. To be called by other tasks.
-    pub fn kill(&mut self) -> Option<~Task> {
-        let inner = unsafe { &mut *self.get() };
-        if inner.unkillable.swap(KILL_KILLED, Relaxed) == KILL_RUNNING {
-            // Got in. Allowed to try to punt the task awake.
-            let flag = unsafe { &mut *inner.killed.get() };
-            match flag.swap(KILL_KILLED, Relaxed) {
-                // Task either not blocked or already taken care of.
-                KILL_RUNNING | KILL_KILLED => None,
-                // Got ownership of the blocked task.
-                // While the usual 'wake' path can just pass back the flag
-                // handle, we (the slower kill path) haven't an extra one lying
-                // around. The task will wake up without a spare.
-                task_ptr => Some(unsafe { revive_task_ptr(task_ptr, None) }),
-            }
+            Owned(cast::transmute(blocked_task_ptr))
         } else {
-            // Otherwise it was either unkillable or already killed. Somebody
-            // else was here first who will deal with the kill signal.
-            None
-        }
-    }
-
-    #[inline]
-    pub fn killed(&self) -> bool {
-        // Called every context switch, so shouldn't report true if the task
-        // is unkillable with a kill signal pending.
-        let inner = unsafe { &*self.get() };
-        let flag  = unsafe { &*inner.killed.get() };
-        // A barrier-related concern here is that a task that gets killed
-        // awake needs to see the killer's write of KILLED to this flag. This
-        // is analogous to receiving a pipe payload; the appropriate barrier
-        // should happen when enqueueing the task.
-        flag.load(Relaxed) == KILL_KILLED
-    }
-
-    pub fn notify_immediate_failure(&mut self) {
-        // A benign data race may happen here if there are failing sibling
-        // tasks that were also spawned-watched. The refcount's write barriers
-        // in UnsafeArc ensure that this write will be seen by the
-        // unwrapper/destructor, whichever task may unwrap it.
-        unsafe { (*self.get()).any_child_failed = true; }
-    }
-
-    // For use when a task does not need to collect its children's exit
-    // statuses, but the task has a parent which might want them.
-    pub fn reparent_children_to(self, parent: &mut KillHandle) {
-        // Optimistic path: If another child of the parent's already failed,
-        // we don't need to worry about any of this.
-        if unsafe { (*parent.get()).any_child_failed } {
-            return;
-        }
-
-        // Try to see if all our children are gone already.
-        match self.try_unwrap() {
-            // Couldn't unwrap; children still alive. Reparent entire handle as
-            // our own tombstone, to be unwrapped later.
-            UnsafeArcSelf(this) => {
-                let this = Cell::new(this); // :(
-                do add_lazy_tombstone(parent) |other_tombstones| {
-                    let this = Cell::new(this.take()); // :(
-                    let others = Cell::new(other_tombstones); // :(
-                    || {
-                        // Prefer to check tombstones that were there first,
-                        // being "more fair" at the expense of tail-recursion.
-                        others.take().map_default(true, |f| f()) && {
-                            let mut inner = this.take().unwrap();
-                            (!inner.any_child_failed) &&
-                                inner.child_tombstones.take().map_default(true, |f| f())
-                        }
-                    }
-                }
-            }
-
-            // Whether or not all children exited, one or more already failed.
-            UnsafeArcT(KillHandleInner { any_child_failed: true, _ }) => {
-                parent.notify_immediate_failure();
-            }
-
-            // All children exited, but some left behind tombstones that we
-            // don't want to wait on now. Give them to our parent.
-            UnsafeArcT(KillHandleInner { any_child_failed: false,
-                                         child_tombstones: Some(f), _ }) => {
-                let f = Cell::new(f); // :(
-                do add_lazy_tombstone(parent) |other_tombstones| {
-                    let f = Cell::new(f.take()); // :(
-                    let others = Cell::new(other_tombstones); // :(
-                    || {
-                        // Prefer fairness to tail-recursion, as in above case.
-                        others.take().map_default(true, |f| f()) &&
-                            f.take()()
-                    }
-                }
-            }
-
-            // All children exited, none failed. Nothing to do!
-            UnsafeArcT(KillHandleInner { any_child_failed: false,
-                                         child_tombstones: None, _ }) => { }
-        }
-
-        // NB: Takes a pthread mutex -- 'blk' not allowed to reschedule.
-        #[inline]
-        fn add_lazy_tombstone(parent: &mut KillHandle,
-                              blk: |Option<proc() -> bool>| -> proc() -> bool)
-                              {
-            let inner: &mut KillHandleInner = unsafe { &mut *parent.get() };
-            unsafe {
-                do inner.graveyard_lock.lock {
-                    // Update the current "head node" of the lazy list.
-                    inner.child_tombstones =
-                        Some(blk(util::replace(&mut inner.child_tombstones, None)));
-                }
-            }
+            let ptr: ~UnsafeArc<AtomicUint> = cast::transmute(blocked_task_ptr & !1);
+            Shared(*ptr)
         }
     }
 }
 
 impl Death {
     pub fn new() -> Death {
-        let (handle, spare) = KillHandle::new();
-        Death {
-            kill_handle:     Some(handle),
-            watching_parent: None,
-            on_exit:         None,
-            unkillable:      0,
-            wont_sleep:      0,
-            spare_kill_flag: Some(spare),
-        }
-    }
-
-    pub fn new_child(&self) -> Death {
-        // FIXME(#7327)
-        let (handle, spare) = KillHandle::new();
         Death {
-            kill_handle:     Some(handle),
-            watching_parent: self.kill_handle.clone(),
             on_exit:         None,
-            unkillable:      0,
             wont_sleep:      0,
-            spare_kill_flag: Some(spare),
         }
     }
 
     /// Collect failure exit codes from children and propagate them to a parent.
-    pub fn collect_failure(&mut self, result: UnwindResult, group: Option<Taskgroup>) {
-        // This may run after the task has already failed, so even though the
-        // task appears to need to be killed, the scheduler should not fail us
-        // when we block to unwrap.
-        // (XXX: Another less-elegant reason for doing this is so that the use
-        // of the LittleLock in reparent_children_to doesn't need to access the
-        // unkillable flag in the kill_handle, since we'll have removed it.)
-        rtassert!(self.unkillable == 0);
-        self.unkillable = 1;
-
-        // NB. See corresponding comment at the callsite in task.rs.
-        // FIXME(#8192): Doesn't work with "let _ = ..."
-        { use util; util::ignore(group); }
-
-        let mut success = result.is_success();
-        let mut result = Cell::new(result);
-
-        // Step 1. Decide if we need to collect child failures synchronously.
-        do self.on_exit.take().map |on_exit| {
-            if success {
-                // We succeeded, but our children might not. Need to wait for them.
-                let mut inner = self.kill_handle.take_unwrap().unwrap();
-
-                if inner.any_child_failed {
-                    success = false;
-                } else {
-                    // Lockless access to tombstones protected by unwrap barrier.
-                    success = inner.child_tombstones.take().map_default(true, |f| f());
-                }
+    pub fn collect_failure(&mut self, result: UnwindResult) {
+        let result = Cell::new(result);
 
-                if !success {
-                    result = Cell::new(Failure(~LinkedFailure as ~Any));
-                }
-            }
+        do self.on_exit.take().map |on_exit| {
             on_exit(result.take());
         };
-
-        // Step 2. Possibly alert possibly-watching parent to failure status.
-        // Note that as soon as parent_handle goes out of scope, the parent
-        // can successfully unwrap its handle and collect our reported status.
-        do self.watching_parent.take().map |mut parent_handle| {
-            if success {
-                // Our handle might be None if we had an exit callback, and
-                // already unwrapped it. But 'success' being true means no
-                // child failed, so there's nothing to do (see below case).
-                do self.kill_handle.take().map |own_handle| {
-                    own_handle.reparent_children_to(&mut parent_handle);
-                };
-            } else {
-                // Can inform watching parent immediately that we failed.
-                // (Note the importance of non-failing tasks NOT writing
-                // 'false', which could obscure another task's failure.)
-                parent_handle.notify_immediate_failure();
-            }
-        };
-
-        // Can't use allow_kill directly; that would require the kill handle.
-        rtassert!(self.unkillable == 1);
-        self.unkillable = 0;
-    }
-
-    /// Fails if a kill signal was received.
-    #[inline]
-    pub fn check_killed(&self, already_failing: bool) {
-        match self.kill_handle {
-            Some(ref kill_handle) =>
-                // The task may be both unkillable and killed if it does some
-                // synchronization during unwinding or cleanup (for example,
-                // sending on a notify port). In that case failing won't help.
-                if self.unkillable == 0 && (!already_failing) && kill_handle.killed() {
-                    fail!("{}", KILLED_MSG);
-                },
-            // This may happen during task death (see comments in collect_failure).
-            None => rtassert!(self.unkillable > 0),
-        }
-    }
-
-    /// Enter a possibly-nested unkillable section of code.
-    /// All calls must be paired with a subsequent call to allow_kill.
-    #[inline]
-    pub fn inhibit_kill(&mut self, already_failing: bool) {
-        self.unkillable += 1;
-        // May fail, hence must happen *after* incrementing the counter
-        if self.unkillable == 1 {
-            rtassert!(self.kill_handle.is_some());
-            self.kill_handle.get_mut_ref().inhibit_kill(already_failing);
-        }
-    }
-
-    /// Exit a possibly-nested unkillable section of code.
-    /// All calls must be paired with a preceding call to inhibit_kill.
-    #[inline]
-    pub fn allow_kill(&mut self, already_failing: bool) {
-        if self.unkillable == 0 {
-            // we need to decrement the counter before failing.
-            self.unkillable -= 1;
-            fail!("Cannot enter a rekillable() block without a surrounding unkillable()");
-        }
-        self.unkillable -= 1;
-        if self.unkillable == 0 {
-            rtassert!(self.kill_handle.is_some());
-            self.kill_handle.get_mut_ref().allow_kill(already_failing);
-        }
     }
 
     /// Enter a possibly-nested "atomic" section of code. Just for assertions.
@@ -699,296 +291,21 @@ impl Death {
 impl Drop for Death {
     fn drop(&mut self) {
         // Mustn't be in an atomic or unkillable section at task death.
-        rtassert!(self.unkillable == 0);
         rtassert!(self.wont_sleep == 0);
     }
 }
 
 #[cfg(test)]
 mod test {
-    #[allow(unused_mut)];
-    use cell::Cell;
     use rt::test::*;
     use super::*;
-    use util;
-
-    // Test cases don't care about the spare killed flag.
-    fn make_kill_handle() -> KillHandle { let (h,_) = KillHandle::new(); h }
-
-    #[ignore(reason = "linked failure")]
-    #[test]
-    fn no_tombstone_success() {
-        do run_in_newsched_task {
-            // Tests case 4 of the 4-way match in reparent_children.
-            let mut parent = make_kill_handle();
-            let mut child  = make_kill_handle();
-
-            // Without another handle to child, the try unwrap should succeed.
-            child.reparent_children_to(&mut parent);
-            let mut parent_inner = parent.unwrap();
-            assert!(parent_inner.child_tombstones.is_none());
-            assert!(parent_inner.any_child_failed == false);
-        }
-    }
-    #[test]
-    fn no_tombstone_failure() {
-        do run_in_newsched_task {
-            // Tests case 2 of the 4-way match in reparent_children.
-            let mut parent = make_kill_handle();
-            let mut child  = make_kill_handle();
-
-            child.notify_immediate_failure();
-            // Without another handle to child, the try unwrap should succeed.
-            child.reparent_children_to(&mut parent);
-            let mut parent_inner = parent.unwrap();
-            assert!(parent_inner.child_tombstones.is_none());
-            // Immediate failure should have been propagated.
-            assert!(parent_inner.any_child_failed);
-        }
-    }
-    #[test]
-    fn no_tombstone_because_sibling_already_failed() {
-        do run_in_newsched_task {
-            // Tests "case 0, the optimistic path in reparent_children.
-            let mut parent = make_kill_handle();
-            let mut child1 = make_kill_handle();
-            let mut child2 = make_kill_handle();
-            let mut link   = child2.clone();
-
-            // Should set parent's child_failed flag
-            child1.notify_immediate_failure();
-            child1.reparent_children_to(&mut parent);
-            // Should bypass trying to unwrap child2 entirely.
-            // Otherwise, due to 'link', it would try to tombstone.
-            child2.reparent_children_to(&mut parent);
-            // Should successfully unwrap even though 'link' is still alive.
-            let mut parent_inner = parent.unwrap();
-            assert!(parent_inner.child_tombstones.is_none());
-            // Immediate failure should have been propagated by first child.
-            assert!(parent_inner.any_child_failed);
-            util::ignore(link);
-        }
-    }
-    #[test]
-    fn one_tombstone_success() {
-        do run_in_newsched_task {
-            let mut parent = make_kill_handle();
-            let mut child  = make_kill_handle();
-            let mut link   = child.clone();
-
-            // Creates 1 tombstone. Existence of 'link' makes try-unwrap fail.
-            child.reparent_children_to(&mut parent);
-            // Let parent collect tombstones.
-            util::ignore(link);
-            // Must have created a tombstone
-            let mut parent_inner = parent.unwrap();
-            assert!(parent_inner.child_tombstones.take_unwrap()());
-            assert!(parent_inner.any_child_failed == false);
-        }
-    }
-    #[test]
-    fn one_tombstone_failure() {
-        do run_in_newsched_task {
-            let mut parent = make_kill_handle();
-            let mut child  = make_kill_handle();
-            let mut link   = child.clone();
-
-            // Creates 1 tombstone. Existence of 'link' makes try-unwrap fail.
-            child.reparent_children_to(&mut parent);
-            // Must happen after tombstone to not be immediately propagated.
-            link.notify_immediate_failure();
-            // Let parent collect tombstones.
-            util::ignore(link);
-            // Must have created a tombstone
-            let mut parent_inner = parent.unwrap();
-            // Failure must be seen in the tombstone.
-            assert!(parent_inner.child_tombstones.take_unwrap()() == false);
-            assert!(parent_inner.any_child_failed == false);
-        }
-    }
-    #[test]
-    fn two_tombstones_success() {
-        do run_in_newsched_task {
-            let mut parent = make_kill_handle();
-            let mut middle = make_kill_handle();
-            let mut child  = make_kill_handle();
-            let mut link   = child.clone();
-
-            child.reparent_children_to(&mut middle); // case 1 tombstone
-            // 'middle' should try-unwrap okay, but still have to reparent.
-            middle.reparent_children_to(&mut parent); // case 3 tombston
-            // Let parent collect tombstones.
-            util::ignore(link);
-            // Must have created a tombstone
-            let mut parent_inner = parent.unwrap();
-            assert!(parent_inner.child_tombstones.take_unwrap()());
-            assert!(parent_inner.any_child_failed == false);
-        }
-    }
-    #[test]
-    fn two_tombstones_failure() {
-        do run_in_newsched_task {
-            let mut parent = make_kill_handle();
-            let mut middle = make_kill_handle();
-            let mut child  = make_kill_handle();
-            let mut link   = child.clone();
-
-            child.reparent_children_to(&mut middle); // case 1 tombstone
-            // Must happen after tombstone to not be immediately propagated.
-            link.notify_immediate_failure();
-            // 'middle' should try-unwrap okay, but still have to reparent.
-            middle.reparent_children_to(&mut parent); // case 3 tombstone
-            // Let parent collect tombstones.
-            util::ignore(link);
-            // Must have created a tombstone
-            let mut parent_inner = parent.unwrap();
-            // Failure must be seen in the tombstone.
-            assert!(parent_inner.child_tombstones.take_unwrap()() == false);
-            assert!(parent_inner.any_child_failed == false);
-        }
-    }
-
-    // Task killing tests
-
-    #[test]
-    fn kill_basic() {
-        do run_in_newsched_task {
-            let mut handle = make_kill_handle();
-            assert!(!handle.killed());
-            assert!(handle.kill().is_none());
-            assert!(handle.killed());
-        }
-    }
-
-    #[test]
-    fn double_kill() {
-        do run_in_newsched_task {
-            let mut handle = make_kill_handle();
-            assert!(!handle.killed());
-            assert!(handle.kill().is_none());
-            assert!(handle.killed());
-            assert!(handle.kill().is_none());
-            assert!(handle.killed());
-        }
-    }
-
-    #[test]
-    fn unkillable_after_kill() {
-        do run_in_newsched_task {
-            let mut handle = make_kill_handle();
-            assert!(handle.kill().is_none());
-            assert!(handle.killed());
-            let handle_cell = Cell::new(handle);
-            let result = do spawntask_try {
-                handle_cell.take().inhibit_kill(false);
-            };
-            assert!(result.is_err());
-        }
-    }
-
-    #[test]
-    fn unkillable_during_kill() {
-        do run_in_newsched_task {
-            let mut handle = make_kill_handle();
-            handle.inhibit_kill(false);
-            assert!(handle.kill().is_none());
-            assert!(!handle.killed());
-            let handle_cell = Cell::new(handle);
-            let result = do spawntask_try {
-                handle_cell.take().allow_kill(false);
-            };
-            assert!(result.is_err());
-        }
-    }
-
-    #[test]
-    fn unkillable_before_kill() {
-        do run_in_newsched_task {
-            let mut handle = make_kill_handle();
-            handle.inhibit_kill(false);
-            handle.allow_kill(false);
-            assert!(handle.kill().is_none());
-            assert!(handle.killed());
-        }
-    }
 
     // Task blocking tests
 
     #[test]
     fn block_and_wake() {
-        do with_test_task |mut task| {
-            BlockedTask::try_block(task).unwrap_right().wake().unwrap()
-        }
-    }
-
-    #[ignore(reason = "linked failure")]
-    #[test]
-    fn block_and_get_killed() {
-        do with_test_task |mut task| {
-            let mut handle = task.death.kill_handle.get_ref().clone();
-            let result = BlockedTask::try_block(task).unwrap_right();
-            let task = handle.kill().unwrap();
-            assert!(result.wake().is_none());
-            task
-        }
-    }
-
-    #[ignore(reason = "linked failure")]
-    #[test]
-    fn block_already_killed() {
-        do with_test_task |mut task| {
-            let mut handle = task.death.kill_handle.get_ref().clone();
-            assert!(handle.kill().is_none());
-            BlockedTask::try_block(task).unwrap_left()
-        }
-    }
-
-    #[ignore(reason = "linked failure")]
-    #[test]
-    fn block_unkillably_and_get_killed() {
-        do with_test_task |mut task| {
-            let mut handle = task.death.kill_handle.get_ref().clone();
-            task.death.inhibit_kill(false);
-            let result = BlockedTask::try_block(task).unwrap_right();
-            assert!(handle.kill().is_none());
-            let mut task = result.wake().unwrap();
-            // This call wants to fail, but we can't have that happen since
-            // we're not running in a newsched task, so we can't even use
-            // spawntask_try. But the failing behaviour is already tested
-            // above, in unkillable_during_kill(), so we punt on it here.
-            task.death.allow_kill(true);
-            task
-        }
-    }
-
-    #[ignore(reason = "linked failure")]
-    #[test]
-    fn block_on_pipe() {
-        // Tests the "killable" path of casting to/from uint.
-        do run_in_newsched_task {
-            do with_test_task |mut task| {
-                let result = BlockedTask::try_block(task).unwrap_right();
-                let result = unsafe { result.cast_to_uint() };
-                let result = unsafe { BlockedTask::cast_from_uint(result) };
-                result.wake().unwrap()
-            }
-        }
-    }
-
-    #[ignore(reason = "linked failure")]
-    #[test]
-    fn block_unkillably_on_pipe() {
-        // Tests the "indestructible" path of casting to/from uint.
-        do run_in_newsched_task {
-            do with_test_task |mut task| {
-                task.death.inhibit_kill(false);
-                let result = BlockedTask::try_block(task).unwrap_right();
-                let result = unsafe { result.cast_to_uint() };
-                let result = unsafe { BlockedTask::cast_from_uint(result) };
-                let mut task = result.wake().unwrap();
-                task.death.allow_kill(false);
-                task
-            }
+        do with_test_task |task| {
+            BlockedTask::block(task).wake().unwrap()
         }
     }
 }