about summary refs log tree commit diff
path: root/src/libstd/rt/task.rs
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2013-07-09 18:28:46 -0700
committerbors <bors@rust-lang.org>2013-07-09 18:28:46 -0700
commit41dcec2fe16e272016ae77d10a6a5ff3a737f192 (patch)
tree6eebc49e7033a0d696c93c8e23d7caeb28d4eca1 /src/libstd/rt/task.rs
parent137d1fb210a844a76f89d7355a1aaf9f7a88af33 (diff)
parent413d51e32debf0c3f7dda2434b64d73585df21ef (diff)
downloadrust-41dcec2fe16e272016ae77d10a6a5ff3a737f192.tar.gz
rust-41dcec2fe16e272016ae77d10a6a5ff3a737f192.zip
auto merge of #7265 : brson/rust/io-upstream, r=brson
r? @graydon, @nikomatsakis, @pcwalton, or @catamorphism

Sorry this is so huge, but it's been accumulating for about a month. There's lots of stuff here, mostly oriented toward enabling multithreaded scheduling and improving compatibility between the old and new runtimes. Adds task pinning so that we can create the 'platform thread' in servo.

[Here](https://github.com/brson/rust/blob/e1555f9b5628af2b6c6ed344cad621399cb7684d/src/libstd/rt/mod.rs#L201) is the current runtime setup code.

About half of this has already been reviewed.
Diffstat (limited to 'src/libstd/rt/task.rs')
-rw-r--r--src/libstd/rt/task.rs284
1 files changed, 264 insertions, 20 deletions
diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs
index f5f5aca71f5..b4f4c1b3e35 100644
--- a/src/libstd/rt/task.rs
+++ b/src/libstd/rt/task.rs
@@ -15,20 +15,45 @@
 
 use borrow;
 use cast::transmute;
+use cleanup;
 use libc::{c_void, uintptr_t};
 use ptr;
 use prelude::*;
+use option::{Option, Some, None};
 use rt::local::Local;
 use rt::logging::StdErrLogger;
 use super::local_heap::LocalHeap;
+use rt::sched::{Scheduler, SchedHandle};
+use rt::join_latch::JoinLatch;
+use rt::stack::{StackSegment, StackPool};
+use rt::context::Context;
+use cell::Cell;
 
 pub struct Task {
     heap: LocalHeap,
     gc: GarbageCollector,
     storage: LocalStorage,
     logger: StdErrLogger,
-    unwinder: Option<Unwinder>,
-    destroyed: bool
+    unwinder: Unwinder,
+    home: Option<SchedHome>,
+    join_latch: Option<~JoinLatch>,
+    on_exit: Option<~fn(bool)>,
+    destroyed: bool,
+    coroutine: Option<~Coroutine>
+}
+
+pub struct Coroutine {
+    /// The segment of stack on which the task is currently running or
+    /// if the task is blocked, on which the task will resume
+    /// execution.
+    priv current_stack_segment: StackSegment,
+    /// Always valid if the task is alive and not running.
+    saved_context: Context
+}
+
+pub enum SchedHome {
+    AnySched,
+    Sched(SchedHandle)
 }
 
 pub struct GarbageCollector;
@@ -39,73 +64,227 @@ pub struct Unwinder {
 }
 
 impl Task {
-    pub fn new() -> Task {
+
+    pub fn new_root(stack_pool: &mut StackPool,
+                    start: ~fn()) -> Task {
+        Task::new_root_homed(stack_pool, AnySched, start)
+    }
+
+    pub fn new_child(&mut self,
+                     stack_pool: &mut StackPool,
+                     start: ~fn()) -> Task {
+        self.new_child_homed(stack_pool, AnySched, start)
+    }
+
+    pub fn new_root_homed(stack_pool: &mut StackPool,
+                          home: SchedHome,
+                          start: ~fn()) -> Task {
         Task {
             heap: LocalHeap::new(),
             gc: GarbageCollector,
             storage: LocalStorage(ptr::null(), None),
             logger: StdErrLogger,
-            unwinder: Some(Unwinder { unwinding: false }),
-            destroyed: false
+            unwinder: Unwinder { unwinding: false },
+            home: Some(home),
+            join_latch: Some(JoinLatch::new_root()),
+            on_exit: None,
+            destroyed: false,
+            coroutine: Some(~Coroutine::new(stack_pool, start))
         }
     }
 
-    pub fn without_unwinding() -> Task {
+    pub fn new_child_homed(&mut self,
+                           stack_pool: &mut StackPool,
+                           home: SchedHome,
+                           start: ~fn()) -> Task {
         Task {
             heap: LocalHeap::new(),
             gc: GarbageCollector,
             storage: LocalStorage(ptr::null(), None),
             logger: StdErrLogger,
-            unwinder: None,
-            destroyed: false
+            home: Some(home),
+            unwinder: Unwinder { unwinding: false },
+            join_latch: Some(self.join_latch.get_mut_ref().new_child()),
+            on_exit: None,
+            destroyed: false,
+            coroutine: Some(~Coroutine::new(stack_pool, start))
         }
     }
 
+    pub fn give_home(&mut self, new_home: SchedHome) {
+        self.home = Some(new_home);
+    }
+
     pub fn run(&mut self, f: &fn()) {
         // This is just an assertion that `run` was called unsafely
         // and this instance of Task is still accessible.
-        do Local::borrow::<Task> |task| {
+        do Local::borrow::<Task, ()> |task| {
             assert!(borrow::ref_eq(task, self));
         }
 
-        match self.unwinder {
-            Some(ref mut unwinder) => {
-                // If there's an unwinder then set up the catch block
-                unwinder.try(f);
+        self.unwinder.try(f);
+        self.destroy();
+
+        // Wait for children. Possibly report the exit status.
+        let local_success = !self.unwinder.unwinding;
+        let join_latch = self.join_latch.swap_unwrap();
+        match self.on_exit {
+            Some(ref on_exit) => {
+                let success = join_latch.wait(local_success);
+                (*on_exit)(success);
             }
             None => {
-                // Otherwise, just run the body
-                f()
+                join_latch.release(local_success);
             }
         }
-        self.destroy();
     }
 
-    /// Must be called manually before finalization to clean up
+    /// must be called manually before finalization to clean up
     /// thread-local resources. Some of the routines here expect
     /// Task to be available recursively so this must be
     /// called unsafely, without removing Task from
     /// thread-local-storage.
     fn destroy(&mut self) {
-        // This is just an assertion that `destroy` was called unsafely
-        // and this instance of Task is still accessible.
-        do Local::borrow::<Task> |task| {
+
+        do Local::borrow::<Task, ()> |task| {
             assert!(borrow::ref_eq(task, self));
         }
+
         match self.storage {
             LocalStorage(ptr, Some(ref dtor)) => {
                 (*dtor)(ptr)
             }
             _ => ()
         }
+
+        // Destroy remaining boxes
+        unsafe { cleanup::annihilate(); }
+
         self.destroyed = true;
     }
+
+    /// Check if *task* is currently home.
+    pub fn is_home(&self) -> bool {
+        do Local::borrow::<Scheduler,bool> |sched| {
+            match self.home {
+                Some(AnySched) => { false }
+                Some(Sched(SchedHandle { sched_id: ref id, _ })) => {
+                    *id == sched.sched_id()
+                }
+                None => { rtabort!("task home of None") }
+            }
+        }
+    }
+
+    pub fn is_home_no_tls(&self, sched: &~Scheduler) -> bool {
+        match self.home {
+            Some(AnySched) => { false }
+            Some(Sched(SchedHandle { sched_id: ref id, _ })) => {
+                *id == sched.sched_id()
+            }
+            None => {rtabort!("task home of None") }
+        }
+    }
+
+    pub fn is_home_using_id(sched_id: uint) -> bool {
+        do Local::borrow::<Task,bool> |task| {
+            match task.home {
+                Some(Sched(SchedHandle { sched_id: ref id, _ })) => {
+                    *id == sched_id
+                }
+                Some(AnySched) => { false }
+                None => { rtabort!("task home of None") }
+            }
+        }
+    }
+
+    /// Check if this *task* has a home.
+    pub fn homed(&self) -> bool {
+        match self.home {
+            Some(AnySched) => { false }
+            Some(Sched(_)) => { true }
+            None => {
+                rtabort!("task home of None")
+            }
+        }
+    }
+
+    /// On a special scheduler?
+    pub fn on_special() -> bool {
+        do Local::borrow::<Scheduler,bool> |sched| {
+            !sched.run_anything
+        }
+    }
+
 }
 
 impl Drop for Task {
     fn drop(&self) { assert!(self.destroyed) }
 }
 
+// Coroutines represent nothing more than a context and a stack
+// segment.
+
+impl Coroutine {
+
+    pub fn new(stack_pool: &mut StackPool, start: ~fn()) -> Coroutine {
+        static MIN_STACK_SIZE: uint = 100000; // XXX: Too much stack
+
+        let start = Coroutine::build_start_wrapper(start);
+        let mut stack = stack_pool.take_segment(MIN_STACK_SIZE);
+        let initial_context = Context::new(start, &mut stack);
+        Coroutine {
+            current_stack_segment: stack,
+            saved_context: initial_context
+        }
+    }
+
+    fn build_start_wrapper(start: ~fn()) -> ~fn() {
+        let start_cell = Cell::new(start);
+        let wrapper: ~fn() = || {
+            // First code after swap to this new context. Run our
+            // cleanup job.
+            unsafe {
+                let sched = Local::unsafe_borrow::<Scheduler>();
+                (*sched).run_cleanup_job();
+
+                let sched = Local::unsafe_borrow::<Scheduler>();
+                let task = (*sched).current_task.get_mut_ref();
+
+                do task.run {
+                    // N.B. Removing `start` from the start wrapper
+                    // closure by emptying a cell is critical for
+                    // correctness. The ~Task pointer, and in turn the
+                    // closure used to initialize the first call
+                    // frame, is destroyed in the scheduler context,
+                    // not task context. So any captured closures must
+                    // not contain user-definable dtors that expect to
+                    // be in task context. By moving `start` out of
+                    // the closure, all the user code goes our of
+                    // scope while the task is still running.
+                    let start = start_cell.take();
+                    start();
+                };
+            }
+
+            let sched = Local::take::<Scheduler>();
+            sched.terminate_current_task();
+        };
+        return wrapper;
+    }
+
+    /// Destroy coroutine and try to reuse stack segment.
+    pub fn recycle(~self, stack_pool: &mut StackPool) {
+        match self {
+            ~Coroutine { current_stack_segment, _ } => {
+                stack_pool.give_segment(current_stack_segment);
+            }
+        }
+    }
+
+}
+
+
 // Just a sanity check to make sure we are catching a Rust-thrown exception
 static UNWIND_TOKEN: uintptr_t = 839147;
 
@@ -184,8 +363,10 @@ mod test {
     fn unwind() {
         do run_in_newsched_task() {
             let result = spawntask_try(||());
+            rtdebug!("trying first assert");
             assert!(result.is_ok());
             let result = spawntask_try(|| fail!());
+            rtdebug!("trying second assert");
             assert!(result.is_err());
         }
     }
@@ -227,4 +408,67 @@ mod test {
             assert!(port.recv() == 10);
         }
     }
+
+    #[test]
+    fn comm_shared_chan() {
+        use comm::*;
+
+        do run_in_newsched_task() {
+            let (port, chan) = stream();
+            let chan = SharedChan::new(chan);
+            chan.send(10);
+            assert!(port.recv() == 10);
+        }
+    }
+
+    #[test]
+    fn linked_failure() {
+        do run_in_newsched_task() {
+            let res = do spawntask_try {
+                spawntask_random(|| fail!());
+            };
+            assert!(res.is_err());
+        }
+    }
+
+    #[test]
+    fn heap_cycles() {
+        use option::{Option, Some, None};
+
+        do run_in_newsched_task {
+            struct List {
+                next: Option<@mut List>,
+            }
+
+            let a = @mut List { next: None };
+            let b = @mut List { next: Some(a) };
+
+            a.next = Some(b);
+        }
+    }
+
+    // XXX: This is a copy of test_future_result in std::task.
+    // It can be removed once the scheduler is turned on by default.
+    #[test]
+    fn future_result() {
+        do run_in_newsched_task {
+            use option::{Some, None};
+            use task::*;
+
+            let mut result = None;
+            let mut builder = task();
+            builder.future_result(|r| result = Some(r));
+            do builder.spawn {}
+            assert_eq!(result.unwrap().recv(), Success);
+
+            result = None;
+            let mut builder = task();
+            builder.future_result(|r| result = Some(r));
+            builder.unlinked();
+            do builder.spawn {
+                fail!();
+            }
+            assert_eq!(result.unwrap().recv(), Failure);
+        }
+    }
 }