about summary refs log tree commit diff
path: root/src/libstd/rt
diff options
context:
space:
mode:
authorBen Blum <bblum@andrew.cmu.edu>2013-07-08 19:31:32 -0400
committerBen Blum <bblum@andrew.cmu.edu>2013-07-20 05:08:56 -0400
commite80efe3fda506877b3fb7ff0df5d97dffb6a906f (patch)
treebd84dc36ca507ed8d3d0e11b0044a8066f302a36 /src/libstd/rt
parent629f6e8d68be06bf07f803db64be6a917a66b2cf (diff)
downloadrust-e80efe3fda506877b3fb7ff0df5d97dffb6a906f.tar.gz
rust-e80efe3fda506877b3fb7ff0df5d97dffb6a906f.zip
Do a task-killed check at the start of task 'timeslices'.
Diffstat (limited to 'src/libstd/rt')
-rw-r--r--src/libstd/rt/kill.rs28
-rw-r--r--src/libstd/rt/sched.rs11
2 files changed, 38 insertions, 1 deletions
diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs
index 929e69d6173..1ea9c073678 100644
--- a/src/libstd/rt/kill.rs
+++ b/src/libstd/rt/kill.rs
@@ -15,7 +15,7 @@ use cell::Cell;
 use option::{Option, Some, None};
 use prelude::*;
 use rt::task::Task;
-use unstable::atomics::{AtomicUint, SeqCst};
+use unstable::atomics::{AtomicUint, Acquire, SeqCst};
 use unstable::sync::{UnsafeAtomicRcBox, LittleLock};
 use util;
 
@@ -137,6 +137,16 @@ impl KillHandle {
         }
     }
 
+    #[inline]
+    pub fn killed(&self) -> bool {
+        // Called every context switch, so shouldn't report true if the task
+        // is unkillable with a kill signal pending.
+        let inner = unsafe { &*self.get() };
+        let flag  = unsafe { &*inner.killed.get() };
+        // FIXME(#6598): can use relaxed ordering (i think)
+        flag.load(Acquire) == KILL_KILLED
+    }
+
     pub fn notify_immediate_failure(&mut self) {
         // A benign data race may happen here if there are failing sibling
         // tasks that were also spawned-watched. The refcount's write barriers
@@ -287,6 +297,22 @@ impl Death {
         self.unkillable = 0;
     }
 
+    /// Fails if a kill signal was received.
+    #[inline]
+    pub fn check_killed(&self) {
+        match self.kill_handle {
+            Some(ref kill_handle) =>
+                // The task may be both unkillable and killed if it does some
+                // synchronization during unwinding or cleanup (for example,
+                // sending on a notify port). In that case failing won't help.
+                if self.unkillable == 0 && kill_handle.killed() {
+                    fail!(KILLED_MSG);
+                },
+            // This may happen during task death (see comments in collect_failure).
+            None => rtassert!(self.unkillable > 0),
+        }
+    }
+
     /// Enter a possibly-nested unkillable section of code.
     /// All calls must be paired with a subsequent call to allow_kill.
     #[inline]
diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs
index 4e4145ddc16..4b51508f0a4 100644
--- a/src/libstd/rt/sched.rs
+++ b/src/libstd/rt/sched.rs
@@ -483,6 +483,11 @@ impl Scheduler {
 
             // Running tasks may have asked us to do some cleanup
             (*sched).run_cleanup_job();
+
+            // Must happen after running the cleanup job (of course).
+            // Might not be running in task context; if not, a later call to
+            // resume_task_immediately will take care of this.
+            (*sched).current_task.map(|t| t.death.check_killed());
         }
     }
 
@@ -524,6 +529,9 @@ impl Scheduler {
             // We could be executing in a different thread now
             let sched = Local::unsafe_borrow::<Scheduler>();
             (*sched).run_cleanup_job();
+
+            // As above, must happen after running the cleanup job.
+            (*sched).current_task.map(|t| t.death.check_killed());
         }
     }
 
@@ -559,6 +567,9 @@ impl Scheduler {
             // We could be executing in a different thread now
             let sched = Local::unsafe_borrow::<Scheduler>();
             (*sched).run_cleanup_job();
+
+            // As above, must happen after running the cleanup job.
+            (*sched).current_task.map(|t| t.death.check_killed());
         }
     }