about summary refs log tree commit diff
diff options
context:
space:
mode:
authorBen Blum <bblum@andrew.cmu.edu>2012-07-24 15:27:45 -0400
committerBen Blum <bblum@andrew.cmu.edu>2012-07-24 15:28:35 -0400
commitae094a7adc8e0f166ea2b137c2940afdb9396bcd (patch)
tree49d21e0d95ea214b2d3e161d2afc9a00d473032f
parent9103e439091fbd4e5ec7e561f007172342065340 (diff)
downloadrust-ae094a7adc8e0f166ea2b137c2940afdb9396bcd.tar.gz
rust-ae094a7adc8e0f166ea2b137c2940afdb9396bcd.zip
Add 'do atomically { .. }' for exclusives
-rw-r--r--src/libcore/sys.rs10
-rw-r--r--src/libcore/task.rs52
-rw-r--r--src/rt/rust_builtin.cpp16
-rw-r--r--src/rt/rust_task.cpp21
-rw-r--r--src/rt/rust_task.h3
-rw-r--r--src/rt/rustrt.def.in2
6 files changed, 91 insertions, 13 deletions
diff --git a/src/libcore/sys.rs b/src/libcore/sys.rs
index 7a23f0c3e0b..6515babcf6b 100644
--- a/src/libcore/sys.rs
+++ b/src/libcore/sys.rs
@@ -9,6 +9,8 @@ export refcount;
 export log_str;
 export lock_and_signal, condition, methods;
 
+import task::atomically;
+
 enum type_desc = {
     size: uint,
     align: uint
@@ -105,13 +107,17 @@ impl methods for lock_and_signal {
     unsafe fn lock<T>(f: fn() -> T) -> T {
         rustrt::rust_lock_cond_lock(self.lock);
         let _r = unlock(self.lock);
-        f()
+        do atomically {
+            f()
+        }
     }
 
     unsafe fn lock_cond<T>(f: fn(condition) -> T) -> T {
         rustrt::rust_lock_cond_lock(self.lock);
         let _r = unlock(self.lock);
-        f(condition_(self.lock))
+        do atomically {
+            f(condition_(self.lock))
+        }
     }
 }
 
diff --git a/src/libcore/task.rs b/src/libcore/task.rs
index 7c3037606d9..6d6f82fde07 100644
--- a/src/libcore/task.rs
+++ b/src/libcore/task.rs
@@ -60,6 +60,7 @@ export yield;
 export failing;
 export get_task;
 export unkillable;
+export atomically;
 
 export local_data_key;
 export local_data_pop;
@@ -683,16 +684,36 @@ fn get_task() -> task {
  */
 unsafe fn unkillable(f: fn()) {
     class allow_failure {
-      let i: (); // since a class must have at least one field
-      new(_i: ()) { self.i = (); }
-      drop { rustrt::rust_task_allow_kill(); }
+        let t: *rust_task;
+        new(t: *rust_task) { self.t = t; }
+        drop { rustrt::rust_task_allow_kill(self.t); }
     }
 
-    let _allow_failure = allow_failure(());
-    rustrt::rust_task_inhibit_kill();
+    let t = rustrt::rust_get_task();
+    let _allow_failure = allow_failure(t);
+    rustrt::rust_task_inhibit_kill(t);
     f();
 }
 
+/**
+ * A stronger version of unkillable that also inhibits scheduling operations.
+ * For use with exclusive ARCs, which use pthread mutexes directly.
+ */
+unsafe fn atomically<U>(f: fn() -> U) -> U {
+    class defer_interrupts {
+        let t: *rust_task;
+        new(t: *rust_task) { self.t = t; }
+        drop {
+            rustrt::rust_task_allow_yield(self.t);
+            rustrt::rust_task_allow_kill(self.t);
+        }
+    }
+    let t = rustrt::rust_get_task();
+    let _interrupts = defer_interrupts(t);
+    rustrt::rust_task_inhibit_kill(t);
+    rustrt::rust_task_inhibit_yield(t);
+    f()
+}
 
 /****************************************************************************
  * Internal
@@ -1235,8 +1256,10 @@ extern mod rustrt {
 
     fn rust_task_is_unwinding(task: *rust_task) -> bool;
     fn rust_osmain_sched_id() -> sched_id;
-    fn rust_task_inhibit_kill();
-    fn rust_task_allow_kill();
+    fn rust_task_inhibit_kill(t: *rust_task);
+    fn rust_task_allow_kill(t: *rust_task);
+    fn rust_task_inhibit_yield(t: *rust_task);
+    fn rust_task_allow_yield(t: *rust_task);
     fn rust_task_kill_other(task: *rust_task);
     fn rust_task_kill_all(task: *rust_task);
 
@@ -1759,6 +1782,21 @@ fn test_unkillable_nested() {
     po.recv();
 }
 
+#[test] #[should_fail] #[ignore(cfg(windows))]
+fn test_atomically() {
+    unsafe { do atomically { yield(); } }
+}
+
+#[test]
+fn test_atomically2() {
+    unsafe { do atomically { } } yield(); // shouldn't fail
+}
+
+#[test] #[should_fail] #[ignore(cfg(windows))]
+fn test_atomically_nested() {
+    unsafe { do atomically { do atomically { } yield(); } }
+}
+
 #[test]
 fn test_child_doesnt_ref_parent() {
     // If the child refcounts the parent task, this will stack overflow when
diff --git a/src/rt/rust_builtin.cpp b/src/rt/rust_builtin.cpp
index f86e9232625..784df64d944 100644
--- a/src/rt/rust_builtin.cpp
+++ b/src/rt/rust_builtin.cpp
@@ -854,18 +854,26 @@ rust_global_env_chan_ptr() {
 }
 
 extern "C" void
-rust_task_inhibit_kill() {
-    rust_task *task = rust_get_current_task();
+rust_task_inhibit_kill(rust_task *task) {
     task->inhibit_kill();
 }
 
 extern "C" void
-rust_task_allow_kill() {
-    rust_task *task = rust_get_current_task();
+rust_task_allow_kill(rust_task *task) {
     task->allow_kill();
 }
 
 extern "C" void
+rust_task_inhibit_yield(rust_task *task) {
+    task->inhibit_yield();
+}
+
+extern "C" void
+rust_task_allow_yield(rust_task *task) {
+    task->allow_yield();
+}
+
+extern "C" void
 rust_task_kill_other(rust_task *task) { /* Used for linked failure */
     task->kill();
 }
diff --git a/src/rt/rust_task.cpp b/src/rt/rust_task.cpp
index c28e3350bf2..e9879525ee7 100644
--- a/src/rt/rust_task.cpp
+++ b/src/rt/rust_task.cpp
@@ -39,6 +39,7 @@ rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state,
     killed(false),
     reentered_rust_stack(false),
     disallow_kill(0),
+    disallow_yield(0),
     c_stack(NULL),
     next_c_sp(0),
     next_rust_sp(0)
@@ -234,9 +235,18 @@ rust_task::must_fail_from_being_killed_inner() {
     return killed && !reentered_rust_stack && disallow_kill == 0;
 }
 
+void rust_task_yield_fail(rust_task *task) {
+    LOG_ERR(task, task, "task %" PRIxPTR " yielded in an atomic section",
+            task);
+    task->fail();
+}
+
 // Only run this on the rust stack
 void
 rust_task::yield(bool *killed) {
+    if (disallow_yield > 0) {
+        call_on_c_stack(this, (void *)rust_task_yield_fail);
+    }
     // FIXME (#2875): clean this up
     if (must_fail_from_being_killed()) {
         {
@@ -672,6 +682,17 @@ rust_task::allow_kill() {
     disallow_kill--;
 }
 
+void rust_task::inhibit_yield() {
+    scoped_lock with(lifecycle_lock);
+    disallow_yield++;
+}
+
+void rust_task::allow_yield() {
+    scoped_lock with(lifecycle_lock);
+    assert(disallow_yield > 0 && "Illegal allow_yield(): already yieldable!");
+    disallow_yield--;
+}
+
 void *
 rust_task::wait_event(bool *killed) {
     scoped_lock with(lifecycle_lock);
diff --git a/src/rt/rust_task.h b/src/rt/rust_task.h
index 2db882d11e3..985e1ebe6e8 100644
--- a/src/rt/rust_task.h
+++ b/src/rt/rust_task.h
@@ -185,6 +185,7 @@ private:
     // Indicates that we've called back into Rust from C
     bool reentered_rust_stack;
     unsigned long disallow_kill;
+    unsigned long disallow_yield;
 
     // The stack used for running C code, borrowed from the scheduler thread
     stk_seg *c_stack;
@@ -318,6 +319,8 @@ public:
 
     void inhibit_kill();
     void allow_kill();
+    void inhibit_yield();
+    void allow_yield();
 };
 
 // FIXME (#2697): It would be really nice to be able to get rid of this.
diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in
index 2079db61466..62fb8482ad5 100644
--- a/src/rt/rustrt.def.in
+++ b/src/rt/rustrt.def.in
@@ -183,6 +183,8 @@ rust_port_drop
 rust_port_task
 rust_task_inhibit_kill
 rust_task_allow_kill
+rust_task_inhibit_yield
+rust_task_allow_yield
 rust_task_kill_other
 rust_task_kill_all
 rust_create_cond_lock