diff options
| author | Alex Crichton <alex@alexcrichton.com> | 2013-12-12 18:01:59 -0800 |
|---|---|---|
| committer | Alex Crichton <alex@alexcrichton.com> | 2013-12-24 19:59:52 -0800 |
| commit | 51abdee5f1ad932671350fdd8a7911fe144d08b8 (patch) | |
| tree | e65726bf152c97cb9854a3e13b3818c0ecde5493 /src/libstd/rt | |
| parent | 6aadc9d18856f8e7ea8038e2c4b2ba0f9507e26a (diff) | |
| download | rust-51abdee5f1ad932671350fdd8a7911fe144d08b8.tar.gz rust-51abdee5f1ad932671350fdd8a7911fe144d08b8.zip | |
green: Rip the bandaid off, introduce libgreen
This extracts everything related to green scheduling from libstd and introduces a new libgreen crate. This mostly involves deleting most of std::rt and moving it to libgreen. Along with the movement of code, this commit rearchitects many functions in the scheduler in order to adapt to the fact that Local::take now *only* works on a Task, not a scheduler. This mostly just involved threading the current green task through in a few locations, but there were one or two spots where things got hairy. There are a few repercussions of this commit: * tube/rc have been removed (the runtime implementation of rc) * There is no longer a "single threaded" spawning mode for tasks. This is now encompassed by 1:1 scheduling + communication. Convenience methods have been introduced that are specific to libgreen to assist in the spawning of pools of schedulers.
Diffstat (limited to 'src/libstd/rt')
| -rw-r--r-- | src/libstd/rt/basic.rs | 230 | ||||
| -rw-r--r-- | src/libstd/rt/borrowck.rs | 11 | ||||
| -rw-r--r-- | src/libstd/rt/context.rs | 463 | ||||
| -rw-r--r-- | src/libstd/rt/env.rs | 2 | ||||
| -rw-r--r-- | src/libstd/rt/kill.rs | 318 | ||||
| -rw-r--r-- | src/libstd/rt/local.rs | 79 | ||||
| -rw-r--r-- | src/libstd/rt/mod.rs | 64 | ||||
| -rw-r--r-- | src/libstd/rt/rc.rs | 139 | ||||
| -rw-r--r-- | src/libstd/rt/rtio.rs | 11 | ||||
| -rw-r--r-- | src/libstd/rt/sched.rs | 1395 | ||||
| -rw-r--r-- | src/libstd/rt/sleeper_list.rs | 47 | ||||
| -rw-r--r-- | src/libstd/rt/stack.rs | 78 | ||||
| -rw-r--r-- | src/libstd/rt/task.rs | 673 | ||||
| -rw-r--r-- | src/libstd/rt/thread.rs | 6 | ||||
| -rw-r--r-- | src/libstd/rt/tube.rs | 170 | ||||
| -rw-r--r-- | src/libstd/rt/unwind.rs | 72 | ||||
| -rw-r--r-- | src/libstd/rt/util.rs | 11 |
17 files changed, 327 insertions, 3442 deletions
diff --git a/src/libstd/rt/basic.rs b/src/libstd/rt/basic.rs deleted file mode 100644 index 3589582357c..00000000000 --- a/src/libstd/rt/basic.rs +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This is a basic event loop implementation not meant for any "real purposes" -//! other than testing the scheduler and proving that it's possible to have a -//! pluggable event loop. - -use prelude::*; - -use cast; -use rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausableIdleCallback, - Callback}; -use unstable::sync::Exclusive; -use io::native; -use util; - -/// This is the only exported function from this module. -pub fn event_loop() -> ~EventLoop { - ~BasicLoop::new() as ~EventLoop -} - -struct BasicLoop { - work: ~[proc()], // pending work - idle: Option<*mut BasicPausable>, // only one is allowed - remotes: ~[(uint, ~Callback)], - next_remote: uint, - messages: Exclusive<~[Message]>, - io: ~IoFactory, -} - -enum Message { RunRemote(uint), RemoveRemote(uint) } - -impl BasicLoop { - fn new() -> BasicLoop { - BasicLoop { - work: ~[], - idle: None, - next_remote: 0, - remotes: ~[], - messages: Exclusive::new(~[]), - io: ~native::IoFactory as ~IoFactory, - } - } - - /// Process everything in the work queue (continually) - fn work(&mut self) { - while self.work.len() > 0 { - for work in util::replace(&mut self.work, ~[]).move_iter() { - work(); - } - } - } - - fn remote_work(&mut self) { - let messages = unsafe { - self.messages.with(|messages| { - if messages.len() > 0 { - Some(util::replace(messages, ~[])) - } else { - None - } - }) - }; - let messages = match messages { - Some(m) => m, None => return - }; - for message in messages.iter() { - self.message(*message); - } - } - - fn message(&mut self, message: Message) { - match message { - RunRemote(i) => { - match self.remotes.mut_iter().find(|& &(id, _)| id == i) { - Some(&(_, ref mut f)) => f.call(), - None => unreachable!() - } - } - RemoveRemote(i) => { - match self.remotes.iter().position(|&(id, _)| id == i) { - Some(i) => { self.remotes.remove(i); } - None => unreachable!() - } - } - } - } - - /// Run the idle callback if one is registered - fn idle(&mut self) { - unsafe { - match self.idle { - Some(idle) => { - if (*idle).active { - (*idle).work.call(); - } - } - None => {} - } - } - } - - fn has_idle(&self) -> bool { - unsafe { self.idle.is_some() && (**self.idle.get_ref()).active } - } -} - -impl EventLoop for BasicLoop { - fn run(&mut self) { - // Not exactly efficient, but it gets the job done. - while self.remotes.len() > 0 || self.work.len() > 0 || self.has_idle() { - - self.work(); - self.remote_work(); - - if self.has_idle() { - self.idle(); - continue - } - - unsafe { - // We block here if we have no messages to process and we may - // receive a message at a later date - self.messages.hold_and_wait(|messages| { - self.remotes.len() > 0 && - messages.len() == 0 && - self.work.len() == 0 - }) - } - } - } - - fn callback(&mut self, f: proc()) { - self.work.push(f); - } - - // XXX: Seems like a really weird requirement to have an event loop provide. - fn pausable_idle_callback(&mut self, cb: ~Callback) -> ~PausableIdleCallback { - let callback = ~BasicPausable::new(self, cb); - rtassert!(self.idle.is_none()); - unsafe { - let cb_ptr: &*mut BasicPausable = cast::transmute(&callback); - self.idle = Some(*cb_ptr); - } - return callback as ~PausableIdleCallback; - } - - fn remote_callback(&mut self, f: ~Callback) -> ~RemoteCallback { - let id = self.next_remote; - self.next_remote += 1; - self.remotes.push((id, f)); - ~BasicRemote::new(self.messages.clone(), id) as ~RemoteCallback - } - - fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> { - let factory: &mut IoFactory = self.io; - Some(factory) - } -} - -struct BasicRemote { - queue: Exclusive<~[Message]>, - id: uint, -} - -impl BasicRemote { - fn new(queue: Exclusive<~[Message]>, id: uint) -> BasicRemote { - BasicRemote { queue: queue, id: id } - } -} - -impl RemoteCallback for BasicRemote { - fn fire(&mut self) { - unsafe { - self.queue.hold_and_signal(|queue| { - queue.push(RunRemote(self.id)); - }) - } - } -} - -impl Drop for BasicRemote { - fn drop(&mut self) { - unsafe { - self.queue.hold_and_signal(|queue| { - queue.push(RemoveRemote(self.id)); - }) - } - } -} - -struct BasicPausable { - eloop: *mut BasicLoop, - work: ~Callback, - active: bool, -} - -impl BasicPausable { - fn new(eloop: &mut BasicLoop, cb: ~Callback) -> BasicPausable { - BasicPausable { - active: false, - work: cb, - eloop: eloop, - } - } -} - -impl PausableIdleCallback for BasicPausable { - fn pause(&mut self) { - self.active = false; - } - fn resume(&mut self) { - self.active = true; - } -} - -impl Drop for BasicPausable { - fn drop(&mut self) { - unsafe { - (*self.eloop).idle = None; - } - } -} diff --git a/src/libstd/rt/borrowck.rs b/src/libstd/rt/borrowck.rs index 423981d9e91..d1e97cb6ec0 100644 --- a/src/libstd/rt/borrowck.rs +++ b/src/libstd/rt/borrowck.rs @@ -12,9 +12,8 @@ use c_str::{ToCStr, CString}; use libc::{c_char, size_t}; use option::{Option, None, Some}; use ptr::RawPtr; -use rt::env; +use rt; use rt::local::Local; -use rt::task; use rt::task::Task; use str::OwnedStr; use str; @@ -62,7 +61,7 @@ unsafe fn fail_borrowed(alloc: *mut raw::Box<()>, file: *c_char, line: size_t) match try_take_task_borrow_list() { None => { // not recording borrows let msg = "borrowed"; - msg.with_c_str(|msg_p| task::begin_unwind_raw(msg_p, file, line)) + msg.with_c_str(|msg_p| rt::begin_unwind_raw(msg_p, file, line)) } Some(borrow_list) => { // recording borrows let mut msg = ~"borrowed"; @@ -76,7 +75,7 @@ unsafe fn fail_borrowed(alloc: *mut raw::Box<()>, file: *c_char, line: size_t) sep = " and at "; } } - msg.with_c_str(|msg_p| task::begin_unwind_raw(msg_p, file, line)) + msg.with_c_str(|msg_p| rt::begin_unwind_raw(msg_p, file, line)) } } } @@ -95,7 +94,7 @@ unsafe fn debug_borrow<T,P:RawPtr<T>>(tag: &'static str, //! A useful debugging function that prints a pointer + tag + newline //! without allocating memory. - if ENABLE_DEBUG && env::debug_borrow() { + if ENABLE_DEBUG && rt::env::debug_borrow() { debug_borrow_slow(tag, p, old_bits, new_bits, filename, line); } @@ -180,7 +179,7 @@ pub unsafe fn unrecord_borrow(a: *u8, if br.alloc != a || br.file != file || br.line != line { let err = format!("wrong borrow found, br={:?}", br); err.with_c_str(|msg_p| { - task::begin_unwind_raw(msg_p, file, line) + rt::begin_unwind_raw(msg_p, file, line) }) } borrow_list diff --git a/src/libstd/rt/context.rs b/src/libstd/rt/context.rs deleted file mode 100644 index 31cf0696881..00000000000 --- a/src/libstd/rt/context.rs +++ /dev/null @@ -1,463 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use option::*; -use super::stack::StackSegment; -use libc::c_void; -use uint; -use cast::{transmute, transmute_mut_unsafe, - transmute_region, transmute_mut_region}; - -pub static RED_ZONE: uint = 20 * 1024; - -// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing -// SSE regs. It would be marginally better not to do this. In C++ we -// use an attribute on a struct. -// FIXME #7761: It would be nice to define regs as `~Option<Registers>` since -// the registers are sometimes empty, but the discriminant would -// then misalign the regs again. -pub struct Context { - /// The context entry point, saved here for later destruction - priv start: Option<~proc()>, - /// Hold the registers while the task or scheduler is suspended - priv regs: ~Registers, - /// Lower bound and upper bound for the stack - priv stack_bounds: Option<(uint, uint)>, -} - -impl Context { - pub fn empty() -> Context { - Context { - start: None, - regs: new_regs(), - stack_bounds: None, - } - } - - /// Create a new context that will resume execution by running proc() - pub fn new(start: proc(), stack: &mut StackSegment) -> Context { - // FIXME #7767: Putting main into a ~ so it's a thin pointer and can - // be passed to the spawn function. Another unfortunate - // allocation - let start = ~start; - - // The C-ABI function that is the task entry point - extern fn task_start_wrapper(f: &proc()) { - // XXX(pcwalton): This may be sketchy. - unsafe { - let f: &|| = transmute(f); - (*f)() - } - } - - let fp: *c_void = task_start_wrapper as *c_void; - let argp: *c_void = unsafe { transmute::<&proc(), *c_void>(&*start) }; - let sp: *uint = stack.end(); - let sp: *mut uint = unsafe { transmute_mut_unsafe(sp) }; - // Save and then immediately load the current context, - // which we will then modify to call the given function when restored - let mut regs = new_regs(); - unsafe { - rust_swap_registers(transmute_mut_region(&mut *regs), transmute_region(&*regs)); - }; - - initialize_call_frame(&mut *regs, fp, argp, sp); - - // Scheduler tasks don't have a stack in the "we allocated it" sense, - // but rather they run on pthreads stacks. We have complete control over - // them in terms of the code running on them (and hopefully they don't - // overflow). Additionally, their coroutine stacks are listed as being - // zero-length, so that's how we detect what's what here. - let stack_base: *uint = stack.start(); - let bounds = if sp as uint == stack_base as uint { - None - } else { - Some((stack_base as uint, sp as uint)) - }; - return Context { - start: Some(start), - regs: regs, - stack_bounds: bounds, - } - } - - /* Switch contexts - - Suspend the current execution context and resume another by - saving the registers values of the executing thread to a Context - then loading the registers from a previously saved Context. - */ - pub fn swap(out_context: &mut Context, in_context: &Context) { - rtdebug!("swapping contexts"); - let out_regs: &mut Registers = match out_context { - &Context { regs: ~ref mut r, .. } => r - }; - let in_regs: &Registers = match in_context { - &Context { regs: ~ref r, .. } => r - }; - - rtdebug!("noting the stack limit and doing raw swap"); - - unsafe { - // Right before we switch to the new context, set the new context's - // stack limit in the OS-specified TLS slot. This also means that - // we cannot call any more rust functions after record_stack_bounds - // returns because they would all likely fail due to the limit being - // invalid for the current task. Lucky for us `rust_swap_registers` - // is a C function so we don't have to worry about that! - match in_context.stack_bounds { - Some((lo, hi)) => record_stack_bounds(lo, hi), - // If we're going back to one of the original contexts or - // something that's possibly not a "normal task", then reset - // the stack limit to 0 to make morestack never fail - None => record_stack_bounds(0, uint::max_value), - } - rust_swap_registers(out_regs, in_regs) - } - } -} - -extern { - fn rust_swap_registers(out_regs: *mut Registers, in_regs: *Registers); -} - -// Register contexts used in various architectures -// -// These structures all represent a context of one task throughout its -// execution. Each struct is a representation of the architecture's register -// set. When swapping between tasks, these register sets are used to save off -// the current registers into one struct, and load them all from another. -// -// Note that this is only used for context switching, which means that some of -// the registers may go unused. For example, for architectures with -// callee/caller saved registers, the context will only reflect the callee-saved -// registers. This is because the caller saved registers are already stored -// elsewhere on the stack (if it was necessary anyway). -// -// Additionally, there may be fields on various architectures which are unused -// entirely because they only reflect what is theoretically possible for a -// "complete register set" to show, but user-space cannot alter these registers. -// An example of this would be the segment selectors for x86. -// -// These structures/functions are roughly in-sync with the source files inside -// of src/rt/arch/$arch. The only currently used function from those folders is -// the `rust_swap_registers` function, but that's only because for now segmented -// stacks are disabled. - -#[cfg(target_arch = "x86")] -struct Registers { - eax: u32, ebx: u32, ecx: u32, edx: u32, - ebp: u32, esi: u32, edi: u32, esp: u32, - cs: u16, ds: u16, ss: u16, es: u16, fs: u16, gs: u16, - eflags: u32, eip: u32 -} - -#[cfg(target_arch = "x86")] -fn new_regs() -> ~Registers { - ~Registers { - eax: 0, ebx: 0, ecx: 0, edx: 0, - ebp: 0, esi: 0, edi: 0, esp: 0, - cs: 0, ds: 0, ss: 0, es: 0, fs: 0, gs: 0, - eflags: 0, eip: 0 - } -} - -#[cfg(target_arch = "x86")] -fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, - sp: *mut uint) { - - let sp = align_down(sp); - let sp = mut_offset(sp, -4); - - unsafe { *sp = arg as uint }; - let sp = mut_offset(sp, -1); - unsafe { *sp = 0 }; // The final return address - - regs.esp = sp as u32; - regs.eip = fptr as u32; - - // Last base pointer on the stack is 0 - regs.ebp = 0; -} - -// windows requires saving more registers (both general and XMM), so the windows -// register context must be larger. -#[cfg(windows, target_arch = "x86_64")] -type Registers = [uint, ..34]; -#[cfg(not(windows), target_arch = "x86_64")] -type Registers = [uint, ..22]; - -#[cfg(windows, target_arch = "x86_64")] -fn new_regs() -> ~Registers { ~([0, .. 34]) } -#[cfg(not(windows), target_arch = "x86_64")] -fn new_regs() -> ~Registers { ~([0, .. 22]) } - -#[cfg(target_arch = "x86_64")] -fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, - sp: *mut uint) { - - // Redefinitions from rt/arch/x86_64/regs.h - static RUSTRT_ARG0: uint = 3; - static RUSTRT_RSP: uint = 1; - static RUSTRT_IP: uint = 8; - static RUSTRT_RBP: uint = 2; - - let sp = align_down(sp); - let sp = mut_offset(sp, -1); - - // The final return address. 0 indicates the bottom of the stack - unsafe { *sp = 0; } - - rtdebug!("creating call frame"); - rtdebug!("fptr {}", fptr); - rtdebug!("arg {}", arg); - rtdebug!("sp {}", sp); - - regs[RUSTRT_ARG0] = arg as uint; - regs[RUSTRT_RSP] = sp as uint; - regs[RUSTRT_IP] = fptr as uint; - - // Last base pointer on the stack should be 0 - regs[RUSTRT_RBP] = 0; -} - -#[cfg(target_arch = "arm")] -type Registers = [uint, ..32]; - -#[cfg(target_arch = "arm")] -fn new_regs() -> ~Registers { ~([0, .. 32]) } - -#[cfg(target_arch = "arm")] -fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, - sp: *mut uint) { - let sp = align_down(sp); - // sp of arm eabi is 8-byte aligned - let sp = mut_offset(sp, -2); - - // The final return address. 0 indicates the bottom of the stack - unsafe { *sp = 0; } - - regs[0] = arg as uint; // r0 - regs[13] = sp as uint; // #53 sp, r13 - regs[14] = fptr as uint; // #60 pc, r15 --> lr -} - -#[cfg(target_arch = "mips")] -type Registers = [uint, ..32]; - -#[cfg(target_arch = "mips")] -fn new_regs() -> ~Registers { ~([0, .. 32]) } - -#[cfg(target_arch = "mips")] -fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, - sp: *mut uint) { - let sp = align_down(sp); - // sp of mips o32 is 8-byte aligned - let sp = mut_offset(sp, -2); - - // The final return address. 0 indicates the bottom of the stack - unsafe { *sp = 0; } - - regs[4] = arg as uint; - regs[29] = sp as uint; - regs[25] = fptr as uint; - regs[31] = fptr as uint; -} - -fn align_down(sp: *mut uint) -> *mut uint { - unsafe { - let sp: uint = transmute(sp); - let sp = sp & !(16 - 1); - transmute::<uint, *mut uint>(sp) - } -} - -// ptr::mut_offset is positive ints only -#[inline] -pub fn mut_offset<T>(ptr: *mut T, count: int) -> *mut T { - use mem::size_of; - (ptr as int + count * (size_of::<T>() as int)) as *mut T -} - -#[inline(always)] -pub unsafe fn record_stack_bounds(stack_lo: uint, stack_hi: uint) { - // When the old runtime had segmented stacks, it used a calculation that was - // "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic - // symbol resolution, llvm function calls, etc. In theory this red zone - // value is 0, but it matters far less when we have gigantic stacks because - // we don't need to be so exact about our stack budget. The "fudge factor" - // was because LLVM doesn't emit a stack check for functions < 256 bytes in - // size. Again though, we have giant stacks, so we round all these - // calculations up to the nice round number of 20k. - record_sp_limit(stack_lo + RED_ZONE); - - return target_record_stack_bounds(stack_lo, stack_hi); - - #[cfg(not(windows))] #[cfg(not(target_arch = "x86_64"))] #[inline(always)] - unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {} - #[cfg(windows, target_arch = "x86_64")] #[inline(always)] - unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) { - // Windows compiles C functions which may check the stack bounds. This - // means that if we want to perform valid FFI on windows, then we need - // to ensure that the stack bounds are what they truly are for this - // task. More info can be found at: - // https://github.com/mozilla/rust/issues/3445#issuecomment-26114839 - // - // stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom) - asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile"); - asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile"); - } -} - -/// Records the current limit of the stack as specified by `end`. -/// -/// This is stored in an OS-dependent location, likely inside of the thread -/// local storage. The location that the limit is stored is a pre-ordained -/// location because it's where LLVM has emitted code to check. -/// -/// Note that this cannot be called under normal circumstances. This function is -/// changing the stack limit, so upon returning any further function calls will -/// possibly be triggering the morestack logic if you're not careful. -/// -/// Also note that this and all of the inside functions are all flagged as -/// "inline(always)" because they're messing around with the stack limits. This -/// would be unfortunate for the functions themselves to trigger a morestack -/// invocation (if they were an actual function call). -#[inline(always)] -pub unsafe fn record_sp_limit(limit: uint) { - return target_record_sp_limit(limit); - - // x86-64 - #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movq $$0x60+90*8, %rsi - movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile") - } - #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile") - } - #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block - // store this inside of the "arbitrary data slot", but double the size - // because this is 64 bit instead of 32 bit - asm!("movq $0, %gs:0x28" :: "r"(limit) :: "volatile") - } - #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile") - } - - // x86 - #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movl $$0x48+90*4, %eax - movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile") - } - #[cfg(target_arch = "x86", target_os = "linux")] - #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile") - } - #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block - // store this inside of the "arbitrary data slot" - asm!("movl $0, %fs:0x14" :: "r"(limit) :: "volatile") - } - - // mips, arm - Some brave soul can port these to inline asm, but it's over - // my head personally - #[cfg(target_arch = "mips")] - #[cfg(target_arch = "arm")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - return record_sp_limit(limit as *c_void); - extern { - fn record_sp_limit(limit: *c_void); - } - } -} - -/// The counterpart of the function above, this function will fetch the current -/// stack limit stored in TLS. -/// -/// Note that all of these functions are meant to be exact counterparts of their -/// brethren above, except that the operands are reversed. -/// -/// As with the setter, this function does not have a __morestack header and can -/// therefore be called in a "we're out of stack" situation. -#[inline(always)] -// currently only called by `rust_stack_exhausted`, which doesn't -// exist in a test build. -#[cfg(not(test))] -pub unsafe fn get_sp_limit() -> uint { - return target_get_sp_limit(); - - // x86-64 - #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq $$0x60+90*8, %rsi - movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile"); - return limit; - } - #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq %gs:0x28, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - - // x86 - #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movl $$0x48+90*4, %eax - movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile"); - return limit; - } - #[cfg(target_arch = "x86", target_os = "linux")] - #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movl %fs:0x14, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - - // mips, arm - Some brave soul can port these to inline asm, but it's over - // my head personally - #[cfg(target_arch = "mips")] - #[cfg(target_arch = "arm")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - return get_sp_limit() as uint; - extern { - fn get_sp_limit() -> *c_void; - } - } -} diff --git a/src/libstd/rt/env.rs b/src/libstd/rt/env.rs index d1bd450afe2..f3fa482b18c 100644 --- a/src/libstd/rt/env.rs +++ b/src/libstd/rt/env.rs @@ -17,7 +17,7 @@ use os; // Note that these are all accessed without any synchronization. // They are expected to be initialized once then left alone. -static mut MIN_STACK: uint = 2000000; +static mut MIN_STACK: uint = 2 * 1024 * 1024; static mut DEBUG_BORROW: bool = false; static mut POISON_ON_FREE: bool = false; diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs deleted file mode 100644 index f4f128cf5aa..00000000000 --- a/src/libstd/rt/kill.rs +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*! - -Task death: asynchronous killing, linked failure, exit code propagation. - -This file implements two orthogonal building-blocks for communicating failure -between tasks. One is 'linked failure' or 'task killing', that is, a failing -task causing other tasks to fail promptly (even those that are blocked on -pipes or I/O). The other is 'exit code propagation', which affects the result -observed by the parent of a task::try task that itself spawns child tasks -(such as any #[test] function). In both cases the data structures live in -KillHandle. - - -I. Task killing. - -The model for killing involves two atomic flags, the "kill flag" and the -"unkillable flag". Operations on the kill flag include: - -- In the taskgroup code (task/spawn.rs), tasks store a clone of their - KillHandle in their shared taskgroup. Another task in the group that fails - will use that handle to call kill(). -- When a task blocks, it turns its ~Task into a BlockedTask by storing a - the transmuted ~Task pointer inside the KillHandle's kill flag. A task - trying to block and a task trying to kill it can simultaneously access the - kill flag, after which the task will get scheduled and fail (no matter who - wins the race). Likewise, a task trying to wake a blocked task normally and - a task trying to kill it can simultaneously access the flag; only one will - get the task to reschedule it. - -Operations on the unkillable flag include: - -- When a task becomes unkillable, it swaps on the flag to forbid any killer - from waking it up while it's blocked inside the unkillable section. If a - kill was already pending, the task fails instead of becoming unkillable. -- When a task is done being unkillable, it restores the flag to the normal - running state. If a kill was received-but-blocked during the unkillable - section, the task fails at this later point. -- When a task tries to kill another task, before swapping on the kill flag, it - first swaps on the unkillable flag, to see if it's "allowed" to wake up the - task. If it isn't, the killed task will receive the signal when it becomes - killable again. (Of course, a task trying to wake the task normally (e.g. - sending on a channel) does not access the unkillable flag at all.) - -Why do we not need acquire/release barriers on any of the kill flag swaps? -This is because barriers establish orderings between accesses on different -memory locations, but each kill-related operation is only a swap on a single -location, so atomicity is all that matters. The exception is kill(), which -does a swap on both flags in sequence. kill() needs no barriers because it -does not matter if its two accesses are seen reordered on another CPU: if a -killer does perform both writes, it means it saw a KILL_RUNNING in the -unkillable flag, which means an unkillable task will see KILL_KILLED and fail -immediately (rendering the subsequent write to the kill flag unnecessary). - - -II. Exit code propagation. - -The basic model for exit code propagation, which is used with the "watched" -spawn mode (on by default for linked spawns, off for supervised and unlinked -spawns), is that a parent will wait for all its watched children to exit -before reporting whether it succeeded or failed. A watching parent will only -report success if it succeeded and all its children also reported success; -otherwise, it will report failure. This is most useful for writing test cases: - - ``` -#[test] -fn test_something_in_another_task { - do spawn { - assert!(collatz_conjecture_is_false()); - } -} - ``` - -Here, as the child task will certainly outlive the parent task, we might miss -the failure of the child when deciding whether or not the test case passed. -The watched spawn mode avoids this problem. - -In order to propagate exit codes from children to their parents, any -'watching' parent must wait for all of its children to exit before it can -report its final exit status. We achieve this by using an UnsafeArc, using the -reference counting to track how many children are still alive, and using the -unwrap() operation in the parent's exit path to wait for all children to exit. -The UnsafeArc referred to here is actually the KillHandle itself. - -This also works transitively, as if a "middle" watched child task is itself -watching a grandchild task, the "middle" task will do unwrap() on its own -KillHandle (thereby waiting for the grandchild to exit) before dropping its -reference to its watching parent (which will alert the parent). - -While UnsafeArc::unwrap() accomplishes the synchronization, there remains the -matter of reporting the exit codes themselves. This is easiest when an exiting -watched task has no watched children of its own: - -- If the task with no watched children exits successfully, it need do nothing. -- If the task with no watched children has failed, it sets a flag in the - parent's KillHandle ("any_child_failed") to false. It then stays false forever. - -However, if a "middle" watched task with watched children of its own exits -before its child exits, we need to ensure that the grandparent task may still -see a failure from the grandchild task. While we could achieve this by having -each intermediate task block on its handle, this keeps around the other resources -the task was using. To be more efficient, this is accomplished via "tombstones". - -A tombstone is a closure, proc() -> bool, which will perform any waiting necessary -to collect the exit code of descendant tasks. In its environment is captured -the KillHandle of whichever task created the tombstone, and perhaps also any -tombstones that that task itself had, and finally also another tombstone, -effectively creating a lazy-list of heap closures. - -When a child wishes to exit early and leave tombstones behind for its parent, -it must use a LittleLock (pthread mutex) to synchronize with any possible -sibling tasks which are trying to do the same thing with the same parent. -However, on the other side, when the parent is ready to pull on the tombstones, -it need not use this lock, because the unwrap() serves as a barrier that ensures -no children will remain with references to the handle. - -The main logic for creating and assigning tombstones can be found in the -function reparent_children_to() in the impl for KillHandle. - - -IIA. Issues with exit code propagation. - -There are two known issues with the current scheme for exit code propagation. - -- As documented in issue #8136, the structure mandates the possibility for stack - overflow when collecting tombstones that are very deeply nested. This cannot - be avoided with the closure representation, as tombstones end up structured in - a sort of tree. However, notably, the tombstones do not actually need to be - collected in any particular order, and so a doubly-linked list may be used. - However we do not do this yet because DList is in libextra. - -- A discussion with Graydon made me realize that if we decoupled the exit code - propagation from the parents-waiting action, this could result in a simpler - implementation as the exit codes themselves would not have to be propagated, - and could instead be propagated implicitly through the taskgroup mechanism - that we already have. The tombstoning scheme would still be required. I have - not implemented this because currently we can't receive a linked failure kill - signal during the task cleanup activity, as that is currently "unkillable", - and occurs outside the task's unwinder's "try" block, so would require some - restructuring. - -*/ - -use cast; -use option::{Option, Some, None}; -use prelude::*; -use iter; -use task::TaskResult; -use rt::task::Task; -use unstable::atomics::{AtomicUint, SeqCst}; -use unstable::sync::UnsafeArc; - -/// A handle to a blocked task. Usually this means having the ~Task pointer by -/// ownership, but if the task is killable, a killer can steal it at any time. -pub enum BlockedTask { - Owned(~Task), - Shared(UnsafeArc<AtomicUint>), -} - -/// Per-task state related to task death, killing, failure, etc. -pub struct Death { - // Action to be done with the exit code. If set, also makes the task wait - // until all its watched children exit before collecting the status. - on_exit: Option<proc(TaskResult)>, - // nesting level counter for unstable::atomically calls (0 == can deschedule). - priv wont_sleep: int, -} - -pub struct BlockedTaskIterator { - priv inner: UnsafeArc<AtomicUint>, -} - -impl Iterator<BlockedTask> for BlockedTaskIterator { - fn next(&mut self) -> Option<BlockedTask> { - Some(Shared(self.inner.clone())) - } -} - -impl BlockedTask { - /// Returns Some if the task was successfully woken; None if already killed. - pub fn wake(self) -> Option<~Task> { - match self { - Owned(task) => Some(task), - Shared(arc) => unsafe { - match (*arc.get()).swap(0, SeqCst) { - 0 => None, - n => cast::transmute(n), - } - } - } - } - - /// Create a blocked task, unless the task was already killed. - pub fn block(task: ~Task) -> BlockedTask { - Owned(task) - } - - /// Converts one blocked task handle to a list of many handles to the same. - pub fn make_selectable(self, num_handles: uint) - -> iter::Take<BlockedTaskIterator> - { - let arc = match self { - Owned(task) => { - let flag = unsafe { AtomicUint::new(cast::transmute(task)) }; - UnsafeArc::new(flag) - } - Shared(arc) => arc.clone(), - }; - BlockedTaskIterator{ inner: arc }.take(num_handles) - } - - // This assertion has two flavours because the wake involves an atomic op. - // In the faster version, destructors will fail dramatically instead. - #[inline] #[cfg(not(test))] - pub fn assert_already_awake(self) { } - #[inline] #[cfg(test)] - pub fn assert_already_awake(self) { assert!(self.wake().is_none()); } - - /// Convert to an unsafe uint value. Useful for storing in a pipe's state flag. - #[inline] - pub unsafe fn cast_to_uint(self) -> uint { - match self { - Owned(task) => { - let blocked_task_ptr: uint = cast::transmute(task); - rtassert!(blocked_task_ptr & 0x1 == 0); - blocked_task_ptr - } - Shared(arc) => { - let blocked_task_ptr: uint = cast::transmute(~arc); - rtassert!(blocked_task_ptr & 0x1 == 0); - blocked_task_ptr | 0x1 - } - } - } - - /// Convert from an unsafe uint value. Useful for retrieving a pipe's state flag. - #[inline] - pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask { - if blocked_task_ptr & 0x1 == 0 { - Owned(cast::transmute(blocked_task_ptr)) - } else { - let ptr: ~UnsafeArc<AtomicUint> = cast::transmute(blocked_task_ptr & !1); - Shared(*ptr) - } - } -} - -impl Death { - pub fn new() -> Death { - Death { - on_exit: None, - wont_sleep: 0, - } - } - - /// Collect failure exit codes from children and propagate them to a parent. - pub fn collect_failure(&mut self, result: TaskResult) { - match self.on_exit.take() { - Some(f) => f(result), - None => {} - } - } - - /// Enter a possibly-nested "atomic" section of code. Just for assertions. - /// All calls must be paired with a subsequent call to allow_deschedule. - #[inline] - pub fn inhibit_deschedule(&mut self) { - self.wont_sleep += 1; - } - - /// Exit a possibly-nested "atomic" section of code. Just for assertions. - /// All calls must be paired with a preceding call to inhibit_deschedule. - #[inline] - pub fn allow_deschedule(&mut self) { - rtassert!(self.wont_sleep != 0); - self.wont_sleep -= 1; - } - - /// Ensure that the task is allowed to become descheduled. - #[inline] - pub fn assert_may_sleep(&self) { - if self.wont_sleep != 0 { - rtabort!("illegal atomic-sleep: attempt to reschedule while \ - using an Exclusive or LittleLock"); - } - } -} - -impl Drop for Death { - fn drop(&mut self) { - // Mustn't be in an atomic or unkillable section at task death. - rtassert!(self.wont_sleep == 0); - } -} - -#[cfg(test)] -mod test { - use rt::test::*; - use super::*; - - // Task blocking tests - - #[test] - fn block_and_wake() { - do with_test_task |task| { - BlockedTask::block(task).wake().unwrap() - } - } -} diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs index d73ad98a25b..ea27956ad90 100644 --- a/src/libstd/rt/local.rs +++ b/src/libstd/rt/local.rs @@ -8,8 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use option::{Option, Some, None}; -use rt::sched::Scheduler; +use option::Option; use rt::task::Task; use rt::local_ptr; @@ -46,82 +45,6 @@ impl Local<local_ptr::Borrowed<Task>> for Task { } } -/// Encapsulates a temporarily-borrowed scheduler. -pub struct BorrowedScheduler { - priv task: local_ptr::Borrowed<Task>, -} - -impl BorrowedScheduler { - fn new(mut task: local_ptr::Borrowed<Task>) -> BorrowedScheduler { - if task.get().sched.is_none() { - rtabort!("no scheduler") - } else { - BorrowedScheduler { - task: task, - } - } - } - - #[inline] - pub fn get<'a>(&'a mut self) -> &'a mut ~Scheduler { - match self.task.get().sched { - None => rtabort!("no scheduler"), - Some(ref mut sched) => sched, - } - } -} - -impl Local<BorrowedScheduler> for Scheduler { - fn put(value: ~Scheduler) { - let mut task = Local::borrow(None::<Task>); - task.get().sched = Some(value); - } - #[inline] - fn take() -> ~Scheduler { - unsafe { - // XXX: Unsafe for speed - let task: *mut Task = Local::unsafe_borrow(); - (*task).sched.take_unwrap() - } - } - fn exists(_: Option<Scheduler>) -> bool { - let mut task = Local::borrow(None::<Task>); - task.get().sched.is_some() - } - #[inline] - fn borrow(_: Option<Scheduler>) -> BorrowedScheduler { - BorrowedScheduler::new(Local::borrow(None::<Task>)) - } - unsafe fn unsafe_take() -> ~Scheduler { rtabort!("unimpl") } - unsafe fn unsafe_borrow() -> *mut Scheduler { - let task: *mut Task = Local::unsafe_borrow(); - match (*task).sched { - Some(~ref mut sched) => { - let s: *mut Scheduler = &mut *sched; - return s; - } - None => { - rtabort!("no scheduler") - } - } - } - unsafe fn try_unsafe_borrow() -> Option<*mut Scheduler> { - let task_opt: Option<*mut Task> = Local::try_unsafe_borrow(); - match task_opt { - Some(task) => { - match (*task).sched { - Some(~ref mut sched) => { - let s: *mut Scheduler = &mut *sched; - Some(s) - } - None => None - } - } - None => None - } - } -} - #[cfg(test)] mod test { use option::None; diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index be35e7579b7..d0c062c1274 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -57,27 +57,17 @@ Several modules in `core` are clients of `rt`: // XXX: this should not be here. #[allow(missing_doc)]; +use any::Any; use clone::Clone; use container::Container; use iter::Iterator; -use option::{Option, None, Some}; +use option::Option; use ptr::RawPtr; -use rt::local::Local; -use rt::sched::{Scheduler, Shutdown}; -use rt::sleeper_list::SleeperList; -use task::TaskResult; -use rt::task::{Task, SchedTask, GreenTask, Sched}; -use send_str::SendStrStatic; -use unstable::atomics::{AtomicInt, AtomicBool, SeqCst}; -use unstable::sync::UnsafeArc; +use result::Result; +use task::TaskOpts; use vec::{OwnedVector, MutableVector, ImmutableVector}; -use vec; -use self::thread::Thread; - -// the os module needs to reach into this helper, so allow general access -// through this reexport. -pub use self::util::set_exit_status; +use self::task::{Task, BlockedTask}; // this is somewhat useful when a program wants to spawn a "reasonable" number // of workers based on the constraints of the system that it's running on. @@ -85,8 +75,8 @@ pub use self::util::set_exit_status; // method... pub use self::util::default_sched_threads; -// Re-export of the functionality in the kill module -pub use self::kill::BlockedTask; +// Export unwinding facilities used by the failure macros +pub use self::unwind::{begin_unwind, begin_unwind_raw}; // XXX: these probably shouldn't be public... #[doc(hidden)] @@ -99,21 +89,12 @@ pub mod shouldnt_be_public { // Internal macros used by the runtime. mod macros; -/// Basic implementation of an EventLoop, provides no I/O interfaces -mod basic; - /// The global (exchange) heap. pub mod global_heap; /// Implementations of language-critical runtime features like @. pub mod task; -/// Facilities related to task failure, killing, and death. -mod kill; - -/// The coroutine task scheduler, built on the `io` event loop. -pub mod sched; - /// The EventLoop and internal synchronous I/O interface. pub mod rtio; @@ -121,27 +102,6 @@ pub mod rtio; /// or task-local storage. pub mod local; -/// A mostly lock-free multi-producer, single consumer queue. -pub mod mpsc_queue; - -/// A lock-free single-producer, single consumer queue. -pub mod spsc_queue; - -/// A lock-free multi-producer, multi-consumer bounded queue. -mod mpmc_bounded_queue; - -/// A parallel work-stealing deque -pub mod deque; - -/// A parallel data structure for tracking sleeping schedulers. -pub mod sleeper_list; - -/// Stack segments and caching. -pub mod stack; - -/// CPU context swapping. -mod context; - /// Bindings to system threading libraries. pub mod thread; @@ -157,16 +117,6 @@ pub mod logging; /// Crate map pub mod crate_map; -/// Tools for testing the runtime -pub mod test; - -/// Reference counting -pub mod rc; - -/// A simple single-threaded channel type for passing buffered data between -/// scheduler and task context -pub mod tube; - /// The runtime needs to be able to put a pointer into thread-local storage. mod local_ptr; diff --git a/src/libstd/rt/rc.rs b/src/libstd/rt/rc.rs deleted file mode 100644 index 2699dab6d38..00000000000 --- a/src/libstd/rt/rc.rs +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! An owned, task-local, reference counted type -//! -//! # Safety note -//! -//! XXX There is currently no type-system mechanism for enforcing that -//! reference counted types are both allocated on the exchange heap -//! and also non-sendable -//! -//! This doesn't prevent borrowing multiple aliasable mutable pointers - -use ops::Drop; -use clone::Clone; -use libc::c_void; -use cast; - -pub struct RC<T> { - priv p: *c_void // ~(uint, T) -} - -impl<T> RC<T> { - pub fn new(val: T) -> RC<T> { - unsafe { - let v = ~(1, val); - let p: *c_void = cast::transmute(v); - RC { p: p } - } - } - - fn get_mut_state(&mut self) -> *mut (uint, T) { - unsafe { - let p: &mut ~(uint, T) = cast::transmute(&mut self.p); - let p: *mut (uint, T) = &mut **p; - return p; - } - } - - fn get_state(&self) -> *(uint, T) { - unsafe { - let p: &~(uint, T) = cast::transmute(&self.p); - let p: *(uint, T) = &**p; - return p; - } - } - - pub fn unsafe_borrow_mut(&mut self) -> *mut T { - unsafe { - match *self.get_mut_state() { - (_, ref mut p) => { - let p: *mut T = p; - return p; - } - } - } - } - - pub fn refcount(&self) -> uint { - unsafe { - match *self.get_state() { - (count, _) => count - } - } - } -} - -#[unsafe_destructor] -impl<T> Drop for RC<T> { - fn drop(&mut self) { - assert!(self.refcount() > 0); - - unsafe { - match *self.get_mut_state() { - (ref mut count, _) => { - *count = *count - 1 - } - } - - if self.refcount() == 0 { - let _: ~(uint, T) = cast::transmute(self.p); - } - } - } -} - -impl<T> Clone for RC<T> { - fn clone(&self) -> RC<T> { - unsafe { - // XXX: Mutable clone - let this: &mut RC<T> = cast::transmute_mut(self); - - match *this.get_mut_state() { - (ref mut count, _) => { - *count = *count + 1; - } - } - } - - RC { p: self.p } - } -} - -#[cfg(test)] -mod test { - use super::RC; - - #[test] - fn smoke_test() { - unsafe { - let mut v1 = RC::new(100); - assert!(*v1.unsafe_borrow_mut() == 100); - assert!(v1.refcount() == 1); - - let mut v2 = v1.clone(); - assert!(*v2.unsafe_borrow_mut() == 100); - assert!(v2.refcount() == 2); - - *v2.unsafe_borrow_mut() = 200; - assert!(*v2.unsafe_borrow_mut() == 200); - assert!(*v1.unsafe_borrow_mut() == 200); - - let v3 = v2.clone(); - assert!(v3.refcount() == 3); - { - let _v1 = v1; - let _v2 = v2; - } - assert!(v3.refcount() == 1); - } - } -} diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index 7207c1a8134..c1c40cc6dff 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -14,14 +14,15 @@ use comm::{SharedChan, Port}; use libc::c_int; use libc; use ops::Drop; -use option::*; +use option::{Option, Some, None}; use path::Path; -use result::*; +use result::{Result, Ok, Err}; +use rt::task::Task; +use rt::local::Local; use ai = io::net::addrinfo; +use io; use io::IoError; -use io::native::NATIVE_IO_FACTORY; -use io::native; use io::net::ip::{IpAddr, SocketAddr}; use io::process::{ProcessConfig, ProcessExit}; use io::signal::Signum; @@ -149,6 +150,8 @@ impl<'a> LocalIo<'a> { } pub trait IoFactory { + fn id(&self) -> uint; + // networking fn tcp_connect(&mut self, addr: SocketAddr) -> Result<~RtioTcpStream, IoError>; fn tcp_bind(&mut self, addr: SocketAddr) -> Result<~RtioTcpListener, IoError>; diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs deleted file mode 100644 index 15aa1602cd0..00000000000 --- a/src/libstd/rt/sched.rs +++ /dev/null @@ -1,1395 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use option::{Option, Some, None}; -use cast::{transmute, transmute_mut_region, transmute_mut_unsafe}; -use clone::Clone; -use unstable::raw; -use super::sleeper_list::SleeperList; -use super::stack::{StackPool}; -use super::rtio::EventLoop; -use super::context::Context; -use super::task::{Task, AnySched, Sched}; -use rt::kill::BlockedTask; -use rt::deque; -use rt::local_ptr; -use rt::local::Local; -use rt::rtio::{RemoteCallback, PausableIdleCallback, Callback}; -use borrow::{to_uint}; -use rand::{XorShiftRng, Rng, Rand}; -use iter::range; -use unstable::mutex::Mutex; -use vec::{OwnedVector}; - -use mpsc = super::mpsc_queue; - -/// A scheduler is responsible for coordinating the execution of Tasks -/// on a single thread. The scheduler runs inside a slightly modified -/// Rust Task. When not running this task is stored in the scheduler -/// struct. The scheduler struct acts like a baton, all scheduling -/// actions are transfers of the baton. -/// -/// XXX: This creates too many callbacks to run_sched_once, resulting -/// in too much allocation and too many events. -pub struct Scheduler { - /// There are N work queues, one per scheduler. - work_queue: deque::Worker<~Task>, - /// Work queues for the other schedulers. These are created by - /// cloning the core work queues. - work_queues: ~[deque::Stealer<~Task>], - /// The queue of incoming messages from other schedulers. - /// These are enqueued by SchedHandles after which a remote callback - /// is triggered to handle the message. - message_queue: mpsc::Consumer<SchedMessage, ()>, - /// Producer used to clone sched handles from - message_producer: mpsc::Producer<SchedMessage, ()>, - /// A shared list of sleeping schedulers. We'll use this to wake - /// up schedulers when pushing work onto the work queue. - sleeper_list: SleeperList, - /// Indicates that we have previously pushed a handle onto the - /// SleeperList but have not yet received the Wake message. - /// Being `true` does not necessarily mean that the scheduler is - /// not active since there are multiple event sources that may - /// wake the scheduler. It just prevents the scheduler from pushing - /// multiple handles onto the sleeper list. - sleepy: bool, - /// A flag to indicate we've received the shutdown message and should - /// no longer try to go to sleep, but exit instead. - no_sleep: bool, - stack_pool: StackPool, - /// The scheduler runs on a special task. When it is not running - /// it is stored here instead of the work queue. - sched_task: Option<~Task>, - /// An action performed after a context switch on behalf of the - /// code running before the context switch - cleanup_job: Option<CleanupJob>, - /// Should this scheduler run any task, or only pinned tasks? - run_anything: bool, - /// If the scheduler shouldn't run some tasks, a friend to send - /// them to. - friend_handle: Option<SchedHandle>, - /// A fast XorShift rng for scheduler use - rng: XorShiftRng, - /// A togglable idle callback - idle_callback: Option<~PausableIdleCallback>, - /// A countdown that starts at a random value and is decremented - /// every time a yield check is performed. When it hits 0 a task - /// will yield. - yield_check_count: uint, - /// A flag to tell the scheduler loop it needs to do some stealing - /// in order to introduce randomness as part of a yield - steal_for_yield: bool, - - // n.b. currently destructors of an object are run in top-to-bottom in order - // of field declaration. Due to its nature, the pausable idle callback - // must have some sort of handle to the event loop, so it needs to get - // destroyed before the event loop itself. For this reason, we destroy - // the event loop last to ensure that any unsafe references to it are - // destroyed before it's actually destroyed. - - /// The event loop used to drive the scheduler and perform I/O - event_loop: ~EventLoop, -} - -/// An indication of how hard to work on a given operation, the difference -/// mainly being whether memory is synchronized or not -#[deriving(Eq)] -enum EffortLevel { - DontTryTooHard, - GiveItYourBest -} - -static MAX_YIELD_CHECKS: uint = 20000; - -fn reset_yield_check(rng: &mut XorShiftRng) -> uint { - let r: uint = Rand::rand(rng); - r % MAX_YIELD_CHECKS + 1 -} - -impl Scheduler { - - // * Initialization Functions - - pub fn new(event_loop: ~EventLoop, - work_queue: deque::Worker<~Task>, - work_queues: ~[deque::Stealer<~Task>], - sleeper_list: SleeperList) - -> Scheduler { - - Scheduler::new_special(event_loop, work_queue, - work_queues, - sleeper_list, true, None) - - } - - pub fn new_special(event_loop: ~EventLoop, - work_queue: deque::Worker<~Task>, - work_queues: ~[deque::Stealer<~Task>], - sleeper_list: SleeperList, - run_anything: bool, - friend: Option<SchedHandle>) - -> Scheduler { - - let (consumer, producer) = mpsc::queue(()); - let mut sched = Scheduler { - sleeper_list: sleeper_list, - message_queue: consumer, - message_producer: producer, - sleepy: false, - no_sleep: false, - event_loop: event_loop, - work_queue: work_queue, - work_queues: work_queues, - stack_pool: StackPool::new(), - sched_task: None, - cleanup_job: None, - run_anything: run_anything, - friend_handle: friend, - rng: new_sched_rng(), - idle_callback: None, - yield_check_count: 0, - steal_for_yield: false - }; - - sched.yield_check_count = reset_yield_check(&mut sched.rng); - - return sched; - } - - // XXX: This may eventually need to be refactored so that - // the scheduler itself doesn't have to call event_loop.run. - // That will be important for embedding the runtime into external - // event loops. - - // Take a main task to run, and a scheduler to run it in. Create a - // scheduler task and bootstrap into it. - pub fn bootstrap(mut ~self, task: ~Task) { - - // Build an Idle callback. - let cb = ~SchedRunner as ~Callback; - self.idle_callback = Some(self.event_loop.pausable_idle_callback(cb)); - - // Initialize the TLS key. - local_ptr::init(); - - // Create a task for the scheduler with an empty context. - let sched_task = ~Task::new_sched_task(); - - // Now that we have an empty task struct for the scheduler - // task, put it in TLS. - Local::put(sched_task); - - // Before starting our first task, make sure the idle callback - // is active. As we do not start in the sleep state this is - // important. - self.idle_callback.get_mut_ref().resume(); - - // Now, as far as all the scheduler state is concerned, we are - // inside the "scheduler" context. So we can act like the - // scheduler and resume the provided task. - self.resume_task_immediately(task); - - // Now we are back in the scheduler context, having - // successfully run the input task. Start by running the - // scheduler. Grab it out of TLS - performing the scheduler - // action will have given it away. - let sched: ~Scheduler = Local::take(); - - rtdebug!("starting scheduler {}", sched.sched_id()); - sched.run(); - - // Close the idle callback. - let mut sched: ~Scheduler = Local::take(); - sched.idle_callback.take(); - // Make one go through the loop to run the close callback. - sched.run(); - - // Now that we are done with the scheduler, clean up the - // scheduler task. Do so by removing it from TLS and manually - // cleaning up the memory it uses. As we didn't actually call - // task.run() on the scheduler task we never get through all - // the cleanup code it runs. - let mut stask: ~Task = Local::take(); - - rtdebug!("stopping scheduler {}", stask.sched.get_ref().sched_id()); - - // Should not have any messages - let message = stask.sched.get_mut_ref().message_queue.pop(); - rtassert!(match message { mpsc::Empty => true, _ => false }); - - stask.destroyed = true; - } - - // This does not return a scheduler, as the scheduler is placed - // inside the task. - pub fn run(mut ~self) { - - // This is unsafe because we need to place the scheduler, with - // the event_loop inside, inside our task. But we still need a - // mutable reference to the event_loop to give it the "run" - // command. - unsafe { - let event_loop: *mut ~EventLoop = &mut self.event_loop; - - { - // Our scheduler must be in the task before the event loop - // is started. - let mut stask = Local::borrow(None::<Task>); - stask.get().sched = Some(self); - } - - (*event_loop).run(); - } - } - - // * Execution Functions - Core Loop Logic - - // The model for this function is that you continue through it - // until you either use the scheduler while performing a schedule - // action, in which case you give it away and return early, or - // you reach the end and sleep. In the case that a scheduler - // action is performed the loop is evented such that this function - // is called again. - fn run_sched_once() { - - // When we reach the scheduler context via the event loop we - // already have a scheduler stored in our local task, so we - // start off by taking it. This is the only path through the - // scheduler where we get the scheduler this way. - let mut sched: ~Scheduler = Local::take(); - - // Assume that we need to continue idling unless we reach the - // end of this function without performing an action. - sched.idle_callback.get_mut_ref().resume(); - - // First we check for scheduler messages, these are higher - // priority than regular tasks. - let sched = match sched.interpret_message_queue(DontTryTooHard) { - Some(sched) => sched, - None => return - }; - - // This helper will use a randomized work-stealing algorithm - // to find work. - let sched = match sched.do_work() { - Some(sched) => sched, - None => return - }; - - // Now, before sleeping we need to find out if there really - // were any messages. Give it your best! - let mut sched = match sched.interpret_message_queue(GiveItYourBest) { - Some(sched) => sched, - None => return - }; - - // If we got here then there was no work to do. - // Generate a SchedHandle and push it to the sleeper list so - // somebody can wake us up later. - if !sched.sleepy && !sched.no_sleep { - rtdebug!("scheduler has no work to do, going to sleep"); - sched.sleepy = true; - let handle = sched.make_handle(); - sched.sleeper_list.push(handle); - // Since we are sleeping, deactivate the idle callback. - sched.idle_callback.get_mut_ref().pause(); - } else { - rtdebug!("not sleeping, already doing so or no_sleep set"); - // We may not be sleeping, but we still need to deactivate - // the idle callback. - sched.idle_callback.get_mut_ref().pause(); - } - - // Finished a cycle without using the Scheduler. Place it back - // in TLS. - Local::put(sched); - } - - // This function returns None if the scheduler is "used", or it - // returns the still-available scheduler. At this point all - // message-handling will count as a turn of work, and as a result - // return None. - fn interpret_message_queue(mut ~self, effort: EffortLevel) -> Option<~Scheduler> { - - let msg = if effort == DontTryTooHard { - self.message_queue.casual_pop() - } else { - // When popping our message queue, we could see an "inconsistent" - // state which means that we *should* be able to pop data, but we - // are unable to at this time. Our options are: - // - // 1. Spin waiting for data - // 2. Ignore this and pretend we didn't find a message - // - // If we choose route 1, then if the pusher in question is currently - // pre-empted, we're going to take up our entire time slice just - // spinning on this queue. If we choose route 2, then the pusher in - // question is still guaranteed to make a send() on its async - // handle, so we will guaranteed wake up and see its message at some - // point. - // - // I have chosen to take route #2. - match self.message_queue.pop() { - mpsc::Data(t) => Some(t), - mpsc::Empty | mpsc::Inconsistent => None - } - }; - - match msg { - Some(PinnedTask(task)) => { - let mut task = task; - task.give_home(Sched(self.make_handle())); - self.resume_task_immediately(task); - return None; - } - Some(TaskFromFriend(task)) => { - rtdebug!("got a task from a friend. lovely!"); - self.process_task(task, Scheduler::resume_task_immediately_cl); - return None; - } - Some(RunOnce(task)) => { - // bypass the process_task logic to force running this task once - // on this home scheduler. This is often used for I/O (homing). - Scheduler::resume_task_immediately_cl(self, task); - return None; - } - Some(Wake) => { - self.sleepy = false; - Local::put(self); - return None; - } - Some(Shutdown) => { - rtdebug!("shutting down"); - if self.sleepy { - // There may be an outstanding handle on the - // sleeper list. Pop them all to make sure that's - // not the case. - loop { - match self.sleeper_list.pop() { - Some(handle) => { - let mut handle = handle; - handle.send(Wake); - } - None => break - } - } - } - // No more sleeping. After there are no outstanding - // event loop references we will shut down. - self.no_sleep = true; - self.sleepy = false; - Local::put(self); - return None; - } - None => { - return Some(self); - } - } - } - - fn do_work(mut ~self) -> Option<~Scheduler> { - rtdebug!("scheduler calling do work"); - match self.find_work() { - Some(task) => { - rtdebug!("found some work! processing the task"); - self.process_task(task, Scheduler::resume_task_immediately_cl); - return None; - } - None => { - rtdebug!("no work was found, returning the scheduler struct"); - return Some(self); - } - } - } - - // Workstealing: In this iteration of the runtime each scheduler - // thread has a distinct work queue. When no work is available - // locally, make a few attempts to steal work from the queues of - // other scheduler threads. If a few steals fail we end up in the - // old "no work" path which is fine. - - // First step in the process is to find a task. This function does - // that by first checking the local queue, and if there is no work - // there, trying to steal from the remote work queues. - fn find_work(&mut self) -> Option<~Task> { - rtdebug!("scheduler looking for work"); - if !self.steal_for_yield { - match self.work_queue.pop() { - Some(task) => { - rtdebug!("found a task locally"); - return Some(task) - } - None => { - rtdebug!("scheduler trying to steal"); - return self.try_steals(); - } - } - } else { - // During execution of the last task, it performed a 'yield', - // so we're doing some work stealing in order to introduce some - // scheduling randomness. Otherwise we would just end up popping - // that same task again. This is pretty lame and is to work around - // the problem that work stealing is not designed for 'non-strict' - // (non-fork-join) task parallelism. - self.steal_for_yield = false; - match self.try_steals() { - Some(task) => { - rtdebug!("stole a task after yielding"); - return Some(task); - } - None => { - rtdebug!("did not steal a task after yielding"); - // Back to business - return self.find_work(); - } - } - } - } - - // Try stealing from all queues the scheduler knows about. This - // naive implementation can steal from our own queue or from other - // special schedulers. - fn try_steals(&mut self) -> Option<~Task> { - let work_queues = &mut self.work_queues; - let len = work_queues.len(); - let start_index = self.rng.gen_range(0, len); - for index in range(0, len).map(|i| (i + start_index) % len) { - match work_queues[index].steal() { - deque::Data(task) => { - rtdebug!("found task by stealing"); - return Some(task) - } - _ => () - } - }; - rtdebug!("giving up on stealing"); - return None; - } - - // * Task Routing Functions - Make sure tasks send up in the right - // place. - - fn process_task(mut ~self, mut task: ~Task, schedule_fn: SchedulingFn) { - rtdebug!("processing a task"); - - let home = task.take_unwrap_home(); - match home { - Sched(home_handle) => { - if home_handle.sched_id != self.sched_id() { - rtdebug!("sending task home"); - task.give_home(Sched(home_handle)); - Scheduler::send_task_home(task); - Local::put(self); - } else { - rtdebug!("running task here"); - task.give_home(Sched(home_handle)); - schedule_fn(self, task); - } - } - AnySched if self.run_anything => { - rtdebug!("running anysched task here"); - task.give_home(AnySched); - schedule_fn(self, task); - } - AnySched => { - rtdebug!("sending task to friend"); - task.give_home(AnySched); - self.send_to_friend(task); - Local::put(self); - } - } - } - - fn send_task_home(task: ~Task) { - let mut task = task; - let mut home = task.take_unwrap_home(); - match home { - Sched(ref mut home_handle) => { - home_handle.send(PinnedTask(task)); - } - AnySched => { - rtabort!("error: cannot send anysched task home"); - } - } - } - - /// Take a non-homed task we aren't allowed to run here and send - /// it to the designated friend scheduler to execute. - fn send_to_friend(&mut self, task: ~Task) { - rtdebug!("sending a task to friend"); - match self.friend_handle { - Some(ref mut handle) => { - handle.send(TaskFromFriend(task)); - } - None => { - rtabort!("tried to send task to a friend but scheduler has no friends"); - } - } - } - - /// Schedule a task to be executed later. - /// - /// Pushes the task onto the work stealing queue and tells the - /// event loop to run it later. Always use this instead of pushing - /// to the work queue directly. - pub fn enqueue_task(&mut self, task: ~Task) { - - // We push the task onto our local queue clone. - self.work_queue.push(task); - self.idle_callback.get_mut_ref().resume(); - - // We've made work available. Notify a - // sleeping scheduler. - - match self.sleeper_list.casual_pop() { - Some(handle) => { - let mut handle = handle; - handle.send(Wake) - } - None => { (/* pass */) } - }; - } - - /// As enqueue_task, but with the possibility for the blocked task to - /// already have been killed. - pub fn enqueue_blocked_task(&mut self, blocked_task: BlockedTask) { - blocked_task.wake().map(|task| self.enqueue_task(task)); - } - - // * Core Context Switching Functions - - // The primary function for changing contexts. In the current - // design the scheduler is just a slightly modified GreenTask, so - // all context swaps are from Task to Task. The only difference - // between the various cases is where the inputs come from, and - // what is done with the resulting task. That is specified by the - // cleanup function f, which takes the scheduler and the - // old task as inputs. - - pub fn change_task_context(mut ~self, - next_task: ~Task, - f: |&mut Scheduler, ~Task|) { - // The current task is grabbed from TLS, not taken as an input. - // Doing an unsafe_take to avoid writing back a null pointer - - // We're going to call `put` later to do that. - let current_task: ~Task = unsafe { Local::unsafe_take() }; - - // Check that the task is not in an atomically() section (e.g., - // holding a pthread mutex, which could deadlock the scheduler). - current_task.death.assert_may_sleep(); - - // These transmutes do something fishy with a closure. - let f_fake_region = unsafe { - transmute::<|&mut Scheduler, ~Task|, - |&mut Scheduler, ~Task|>(f) - }; - let f_opaque = ClosureConverter::from_fn(f_fake_region); - - // The current task is placed inside an enum with the cleanup - // function. This enum is then placed inside the scheduler. - self.cleanup_job = Some(CleanupJob::new(current_task, f_opaque)); - - // The scheduler is then placed inside the next task. - let mut next_task = next_task; - next_task.sched = Some(self); - - // However we still need an internal mutable pointer to the - // original task. The strategy here was "arrange memory, then - // get pointers", so we crawl back up the chain using - // transmute to eliminate borrowck errors. - unsafe { - - let sched: &mut Scheduler = - transmute_mut_region(*next_task.sched.get_mut_ref()); - - let current_task: &mut Task = match sched.cleanup_job { - Some(CleanupJob { task: ref task, .. }) => { - let task_ptr: *~Task = task; - transmute_mut_region(*transmute_mut_unsafe(task_ptr)) - } - None => { - rtabort!("no cleanup job"); - } - }; - - let (current_task_context, next_task_context) = - Scheduler::get_contexts(current_task, next_task); - - // Done with everything - put the next task in TLS. This - // works because due to transmute the borrow checker - // believes that we have no internal pointers to - // next_task. - Local::put(next_task); - - // The raw context swap operation. The next action taken - // will be running the cleanup job from the context of the - // next task. - Context::swap(current_task_context, next_task_context); - } - - // When the context swaps back to this task we immediately - // run the cleanup job, as expected by the previously called - // swap_contexts function. - unsafe { - let task: *mut Task = Local::unsafe_borrow(); - (*task).sched.get_mut_ref().run_cleanup_job(); - - // See the comments in switch_running_tasks_and_then for why a lock - // is acquired here. This is the resumption points and the "bounce" - // that it is referring to. - (*task).nasty_deschedule_lock.lock(); - (*task).nasty_deschedule_lock.unlock(); - } - } - - // Returns a mutable reference to both contexts involved in this - // swap. This is unsafe - we are getting mutable internal - // references to keep even when we don't own the tasks. It looks - // kinda safe because we are doing transmutes before passing in - // the arguments. - pub fn get_contexts<'a>(current_task: &mut Task, next_task: &mut Task) -> - (&'a mut Context, &'a mut Context) { - let current_task_context = - &mut current_task.coroutine.get_mut_ref().saved_context; - let next_task_context = - &mut next_task.coroutine.get_mut_ref().saved_context; - unsafe { - (transmute_mut_region(current_task_context), - transmute_mut_region(next_task_context)) - } - } - - // * Context Swapping Helpers - Here be ugliness! - - pub fn resume_task_immediately(~self, task: ~Task) { - self.change_task_context(task, |sched, stask| { - sched.sched_task = Some(stask); - }) - } - - fn resume_task_immediately_cl(sched: ~Scheduler, - task: ~Task) { - sched.resume_task_immediately(task) - } - - - pub fn resume_blocked_task_immediately(~self, blocked_task: BlockedTask) { - match blocked_task.wake() { - Some(task) => { self.resume_task_immediately(task); } - None => Local::put(self) - }; - } - - /// Block a running task, context switch to the scheduler, then pass the - /// blocked task to a closure. - /// - /// # Safety note - /// - /// The closure here is a *stack* closure that lives in the - /// running task. It gets transmuted to the scheduler's lifetime - /// and called while the task is blocked. - /// - /// This passes a Scheduler pointer to the fn after the context switch - /// in order to prevent that fn from performing further scheduling operations. - /// Doing further scheduling could easily result in infinite recursion. - /// - /// Note that if the closure provided relinquishes ownership of the - /// BlockedTask, then it is possible for the task to resume execution before - /// the closure has finished executing. This would naturally introduce a - /// race if the closure and task shared portions of the environment. - /// - /// This situation is currently prevented, or in other words it is - /// guaranteed that this function will not return before the given closure - /// has returned. - pub fn deschedule_running_task_and_then(mut ~self, - f: |&mut Scheduler, BlockedTask|) { - // Trickier - we need to get the scheduler task out of self - // and use it as the destination. - let stask = self.sched_task.take_unwrap(); - // Otherwise this is the same as below. - self.switch_running_tasks_and_then(stask, f); - } - - pub fn switch_running_tasks_and_then(~self, next_task: ~Task, - f: |&mut Scheduler, BlockedTask|) { - // And here comes one of the sad moments in which a lock is used in a - // core portion of the rust runtime. As always, this is highly - // undesirable, so there's a good reason behind it. - // - // There is an excellent outline of the problem in issue #8132, and it's - // summarized in that `f` is executed on a sched task, but its - // environment is on the previous task. If `f` relinquishes ownership of - // the BlockedTask, then it may introduce a race where `f` is using the - // environment as well as the code after the 'deschedule' block. - // - // The solution we have chosen to adopt for now is to acquire a - // task-local lock around this block. The resumption of the task in - // context switching will bounce on the lock, thereby waiting for this - // block to finish, eliminating the race mentioned above. - // - // To actually maintain a handle to the lock, we use an unsafe pointer - // to it, but we're guaranteed that the task won't exit until we've - // unlocked the lock so there's no worry of this memory going away. - self.change_task_context(next_task, |sched, mut task| { - let lock: *mut Mutex = &mut task.nasty_deschedule_lock; - unsafe { (*lock).lock() } - f(sched, BlockedTask::block(task)); - unsafe { (*lock).unlock() } - }) - } - - fn switch_task(sched: ~Scheduler, task: ~Task) { - sched.switch_running_tasks_and_then(task, |sched, last_task| { - sched.enqueue_blocked_task(last_task); - }); - } - - // * Task Context Helpers - - /// Called by a running task to end execution, after which it will - /// be recycled by the scheduler for reuse in a new task. - pub fn terminate_current_task(mut ~self) { - // Similar to deschedule running task and then, but cannot go through - // the task-blocking path. The task is already dying. - let stask = self.sched_task.take_unwrap(); - self.change_task_context(stask, |sched, mut dead_task| { - let coroutine = dead_task.coroutine.take_unwrap(); - coroutine.recycle(&mut sched.stack_pool); - }) - } - - pub fn run_task(task: ~Task) { - let sched: ~Scheduler = Local::take(); - sched.process_task(task, Scheduler::switch_task); - } - - pub fn run_task_later(next_task: ~Task) { - let mut sched = Local::borrow(None::<Scheduler>); - sched.get().enqueue_task(next_task); - } - - /// Yield control to the scheduler, executing another task. This is guaranteed - /// to introduce some amount of randomness to the scheduler. Currently the - /// randomness is a result of performing a round of work stealing (which - /// may end up stealing from the current scheduler). - pub fn yield_now(mut ~self) { - self.yield_check_count = reset_yield_check(&mut self.rng); - // Tell the scheduler to start stealing on the next iteration - self.steal_for_yield = true; - self.deschedule_running_task_and_then(|sched, task| { - sched.enqueue_blocked_task(task); - }) - } - - pub fn maybe_yield(mut ~self) { - // The number of times to do the yield check before yielding, chosen arbitrarily. - rtassert!(self.yield_check_count > 0); - self.yield_check_count -= 1; - if self.yield_check_count == 0 { - self.yield_now(); - } else { - Local::put(self); - } - } - - - // * Utility Functions - - pub fn sched_id(&self) -> uint { to_uint(self) } - - pub fn run_cleanup_job(&mut self) { - let cleanup_job = self.cleanup_job.take_unwrap(); - cleanup_job.run(self); - } - - pub fn make_handle(&mut self) -> SchedHandle { - let remote = self.event_loop.remote_callback(~SchedRunner as ~Callback); - - return SchedHandle { - remote: remote, - queue: self.message_producer.clone(), - sched_id: self.sched_id() - }; - } -} - -// Supporting types - -type SchedulingFn = extern "Rust" fn (~Scheduler, ~Task); - -pub enum SchedMessage { - Wake, - Shutdown, - PinnedTask(~Task), - TaskFromFriend(~Task), - RunOnce(~Task), -} - -pub struct SchedHandle { - priv remote: ~RemoteCallback, - priv queue: mpsc::Producer<SchedMessage, ()>, - sched_id: uint -} - -impl SchedHandle { - pub fn send(&mut self, msg: SchedMessage) { - self.queue.push(msg); - self.remote.fire(); - } -} - -struct SchedRunner; - -impl Callback for SchedRunner { - fn call(&mut self) { - Scheduler::run_sched_once(); - } -} - -struct CleanupJob { - task: ~Task, - f: UnsafeTaskReceiver -} - -impl CleanupJob { - pub fn new(task: ~Task, f: UnsafeTaskReceiver) -> CleanupJob { - CleanupJob { - task: task, - f: f - } - } - - pub fn run(self, sched: &mut Scheduler) { - let CleanupJob { task: task, f: f } = self; - f.to_fn()(sched, task) - } -} - -// XXX: Some hacks to put a || closure in Scheduler without borrowck -// complaining -type UnsafeTaskReceiver = raw::Closure; -trait ClosureConverter { - fn from_fn(|&mut Scheduler, ~Task|) -> Self; - fn to_fn(self) -> |&mut Scheduler, ~Task|; -} -impl ClosureConverter for UnsafeTaskReceiver { - fn from_fn(f: |&mut Scheduler, ~Task|) -> UnsafeTaskReceiver { - unsafe { transmute(f) } - } - fn to_fn(self) -> |&mut Scheduler, ~Task| { unsafe { transmute(self) } } -} - -// On unix, we read randomness straight from /dev/urandom, but the -// default constructor of an XorShiftRng does this via io::fs, which -// relies on the scheduler existing, so we have to manually load -// randomness. Windows has its own C API for this, so we don't need to -// worry there. -#[cfg(windows)] -fn new_sched_rng() -> XorShiftRng { - XorShiftRng::new() -} -#[cfg(unix)] -fn new_sched_rng() -> XorShiftRng { - use libc; - use mem; - use c_str::ToCStr; - use vec::MutableVector; - use iter::Iterator; - use rand::SeedableRng; - - let fd = "/dev/urandom".with_c_str(|name| { - unsafe { libc::open(name, libc::O_RDONLY, 0) } - }); - if fd == -1 { - rtabort!("could not open /dev/urandom for reading.") - } - - let mut seeds = [0u32, .. 4]; - let size = mem::size_of_val(&seeds); - loop { - let nbytes = unsafe { - libc::read(fd, - seeds.as_mut_ptr() as *mut libc::c_void, - size as libc::size_t) - }; - rtassert!(nbytes as uint == size); - - if !seeds.iter().all(|x| *x == 0) { - break; - } - } - - unsafe {libc::close(fd);} - - SeedableRng::from_seed(seeds) -} - -#[cfg(test)] -mod test { - use prelude::*; - - use borrow::to_uint; - use rt::deque::BufferPool; - use rt::basic; - use rt::sched::{Scheduler}; - use rt::task::{Task, Sched}; - use rt::test::*; - use rt::thread::Thread; - use rt::util; - use task::TaskResult; - use unstable::run_in_bare_thread; - - #[test] - fn trivial_run_in_newsched_task_test() { - let mut task_ran = false; - let task_ran_ptr: *mut bool = &mut task_ran; - do run_in_newsched_task || { - unsafe { *task_ran_ptr = true }; - rtdebug!("executed from the new scheduler") - } - assert!(task_ran); - } - - #[test] - fn multiple_task_test() { - let total = 10; - let mut task_run_count = 0; - let task_run_count_ptr: *mut uint = &mut task_run_count; - do run_in_newsched_task || { - for _ in range(0u, total) { - do spawntask || { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1}; - } - } - } - assert!(task_run_count == total); - } - - #[test] - fn multiple_task_nested_test() { - let mut task_run_count = 0; - let task_run_count_ptr: *mut uint = &mut task_run_count; - do run_in_newsched_task || { - do spawntask || { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; - do spawntask || { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; - do spawntask || { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; - } - } - } - } - assert!(task_run_count == 3); - } - - // Confirm that a sched_id actually is the uint form of the - // pointer to the scheduler struct. - #[test] - fn simple_sched_id_test() { - do run_in_bare_thread { - let sched = ~new_test_uv_sched(); - assert!(to_uint(sched) == sched.sched_id()); - } - } - - // Compare two scheduler ids that are different, this should never - // fail but may catch a mistake someday. - #[test] - fn compare_sched_id_test() { - do run_in_bare_thread { - let sched_one = ~new_test_uv_sched(); - let sched_two = ~new_test_uv_sched(); - assert!(sched_one.sched_id() != sched_two.sched_id()); - } - } - - - // A very simple test that confirms that a task executing on the - // home scheduler notices that it is home. - #[test] - fn test_home_sched() { - do run_in_bare_thread { - let mut task_ran = false; - let task_ran_ptr: *mut bool = &mut task_ran; - - let mut sched = ~new_test_uv_sched(); - let sched_handle = sched.make_handle(); - - let mut task = ~do Task::new_root_homed(&mut sched.stack_pool, None, - Sched(sched_handle)) { - unsafe { *task_ran_ptr = true }; - assert!(Task::on_appropriate_sched()); - }; - - let on_exit: proc(TaskResult) = proc(exit_status) { - rtassert!(exit_status.is_ok()) - }; - task.death.on_exit = Some(on_exit); - - sched.bootstrap(task); - } - } - - // An advanced test that checks all four possible states that a - // (task,sched) can be in regarding homes. - - #[test] - fn test_schedule_home_states() { - use rt::sleeper_list::SleeperList; - use rt::sched::Shutdown; - use borrow; - - do run_in_bare_thread { - - let sleepers = SleeperList::new(); - let mut pool = BufferPool::new(); - let (normal_worker, normal_stealer) = pool.deque(); - let (special_worker, special_stealer) = pool.deque(); - let queues = ~[normal_stealer, special_stealer]; - - // Our normal scheduler - let mut normal_sched = ~Scheduler::new( - basic::event_loop(), - normal_worker, - queues.clone(), - sleepers.clone()); - - let normal_handle = normal_sched.make_handle(); - - let friend_handle = normal_sched.make_handle(); - - // Our special scheduler - let mut special_sched = ~Scheduler::new_special( - basic::event_loop(), - special_worker, - queues.clone(), - sleepers.clone(), - false, - Some(friend_handle)); - - let special_handle = special_sched.make_handle(); - - let t1_handle = special_sched.make_handle(); - let t4_handle = special_sched.make_handle(); - - // Four test tasks: - // 1) task is home on special - // 2) task not homed, sched doesn't care - // 3) task not homed, sched requeues - // 4) task not home, send home - - let task1 = ~do Task::new_root_homed(&mut special_sched.stack_pool, None, - Sched(t1_handle)) || { - rtassert!(Task::on_appropriate_sched()); - }; - rtdebug!("task1 id: **{}**", borrow::to_uint(task1)); - - let task2 = ~do Task::new_root(&mut normal_sched.stack_pool, None) { - rtassert!(Task::on_appropriate_sched()); - }; - - let task3 = ~do Task::new_root(&mut normal_sched.stack_pool, None) { - rtassert!(Task::on_appropriate_sched()); - }; - - let task4 = ~do Task::new_root_homed(&mut special_sched.stack_pool, None, - Sched(t4_handle)) { - rtassert!(Task::on_appropriate_sched()); - }; - rtdebug!("task4 id: **{}**", borrow::to_uint(task4)); - - // Signal from the special task that we are done. - let (port, chan) = Chan::<()>::new(); - - let normal_task = ~do Task::new_root(&mut normal_sched.stack_pool, None) { - rtdebug!("*about to submit task2*"); - Scheduler::run_task(task2); - rtdebug!("*about to submit task4*"); - Scheduler::run_task(task4); - rtdebug!("*normal_task done*"); - port.recv(); - let mut nh = normal_handle; - nh.send(Shutdown); - let mut sh = special_handle; - sh.send(Shutdown); - }; - - rtdebug!("normal task: {}", borrow::to_uint(normal_task)); - - let special_task = ~do Task::new_root(&mut special_sched.stack_pool, None) { - rtdebug!("*about to submit task1*"); - Scheduler::run_task(task1); - rtdebug!("*about to submit task3*"); - Scheduler::run_task(task3); - rtdebug!("*done with special_task*"); - chan.send(()); - }; - - rtdebug!("special task: {}", borrow::to_uint(special_task)); - - let normal_sched = normal_sched; - let normal_thread = do Thread::start { - normal_sched.bootstrap(normal_task); - rtdebug!("finished with normal_thread"); - }; - - let special_sched = special_sched; - let special_thread = do Thread::start { - special_sched.bootstrap(special_task); - rtdebug!("finished with special_sched"); - }; - - normal_thread.join(); - special_thread.join(); - } - } - - #[test] - fn test_stress_schedule_task_states() { - if util::limit_thread_creation_due_to_osx_and_valgrind() { return; } - let n = stress_factor() * 120; - for _ in range(0, n as int) { - test_schedule_home_states(); - } - } - - #[test] - fn test_io_callback() { - use io::timer; - - // This is a regression test that when there are no schedulable tasks - // in the work queue, but we are performing I/O, that once we do put - // something in the work queue again the scheduler picks it up and doesn't - // exit before emptying the work queue - do run_in_uv_task { - do spawntask { - timer::sleep(10); - } - } - } - - #[test] - fn handle() { - do run_in_bare_thread { - let (port, chan) = Chan::new(); - - let thread_one = do Thread::start { - let chan = chan; - do run_in_newsched_task_core { - chan.send(()); - } - }; - - let thread_two = do Thread::start { - let port = port; - do run_in_newsched_task_core { - port.recv(); - } - }; - - thread_two.join(); - thread_one.join(); - } - } - - // A regression test that the final message is always handled. - // Used to deadlock because Shutdown was never recvd. - #[test] - fn no_missed_messages() { - use rt::sleeper_list::SleeperList; - use rt::stack::StackPool; - use rt::sched::{Shutdown, TaskFromFriend}; - - do run_in_bare_thread { - stress_factor().times(|| { - let sleepers = SleeperList::new(); - let mut pool = BufferPool::new(); - let (worker, stealer) = pool.deque(); - - let mut sched = ~Scheduler::new( - basic::event_loop(), - worker, - ~[stealer], - sleepers.clone()); - - let mut handle = sched.make_handle(); - - let sched = sched; - let thread = do Thread::start { - let mut sched = sched; - let bootstrap_task = - ~Task::new_root(&mut sched.stack_pool, - None, - proc()()); - sched.bootstrap(bootstrap_task); - }; - - let mut stack_pool = StackPool::new(); - let task = ~Task::new_root(&mut stack_pool, None, proc()()); - handle.send(TaskFromFriend(task)); - - handle.send(Shutdown); - drop(handle); - - thread.join(); - }) - } - } - - #[test] - fn multithreading() { - use num::Times; - use vec::OwnedVector; - use container::Container; - - do run_in_mt_newsched_task { - let mut ports = ~[]; - 10.times(|| { - let (port, chan) = Chan::new(); - do spawntask_later { - chan.send(()); - } - ports.push(port); - }); - - while !ports.is_empty() { - ports.pop().recv(); - } - } - } - - #[test] - fn thread_ring() { - do run_in_mt_newsched_task { - let (end_port, end_chan) = Chan::new(); - - let n_tasks = 10; - let token = 2000; - - let (mut p, ch1) = Chan::new(); - ch1.send((token, end_chan)); - let mut i = 2; - while i <= n_tasks { - let (next_p, ch) = Chan::new(); - let imm_i = i; - let imm_p = p; - do spawntask_random { - roundtrip(imm_i, n_tasks, &imm_p, &ch); - }; - p = next_p; - i += 1; - } - let p = p; - do spawntask_random { - roundtrip(1, n_tasks, &p, &ch1); - } - - end_port.recv(); - } - - fn roundtrip(id: int, n_tasks: int, - p: &Port<(int, Chan<()>)>, - ch: &Chan<(int, Chan<()>)>) { - while (true) { - match p.recv() { - (1, end_chan) => { - debug!("{}\n", id); - end_chan.send(()); - return; - } - (token, end_chan) => { - debug!("thread: {} got token: {}", id, token); - ch.send((token - 1, end_chan)); - if token <= n_tasks { - return; - } - } - } - } - } - } - - #[test] - fn start_closure_dtor() { - use ops::Drop; - - // Regression test that the `start` task entrypoint can - // contain dtors that use task resources - do run_in_newsched_task { - struct S { field: () } - - impl Drop for S { - fn drop(&mut self) { - let _foo = @0; - } - } - - let s = S { field: () }; - - do spawntask { - let _ss = &s; - } - } - } - - // FIXME: #9407: xfail-test - #[ignore] - #[test] - fn dont_starve_1() { - stress_factor().times(|| { - do run_in_mt_newsched_task { - let (port, chan) = Chan::new(); - - // This task should not be able to starve the sender; - // The sender should get stolen to another thread. - do spawntask { - while port.try_recv().is_none() { } - } - - chan.send(()); - } - }) - } - - #[test] - fn dont_starve_2() { - stress_factor().times(|| { - do run_in_newsched_task { - let (port, chan) = Chan::new(); - let (_port2, chan2) = Chan::new(); - - // This task should not be able to starve the other task. - // The sends should eventually yield. - do spawntask { - while port.try_recv().is_none() { - chan2.send(()); - } - } - - chan.send(()); - } - }) - } - - // Regression test for a logic bug that would cause single-threaded schedulers - // to sleep forever after yielding and stealing another task. - #[test] - fn single_threaded_yield() { - use task::{spawn, spawn_sched, SingleThreaded, deschedule}; - use num::Times; - - do spawn_sched(SingleThreaded) { - 5.times(|| { deschedule(); }) - } - do spawn { } - do spawn { } - } -} diff --git a/src/libstd/rt/sleeper_list.rs b/src/libstd/rt/sleeper_list.rs deleted file mode 100644 index 39c7431837f..00000000000 --- a/src/libstd/rt/sleeper_list.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Maintains a shared list of sleeping schedulers. Schedulers -//! use this to wake each other up. - -use rt::sched::SchedHandle; -use rt::mpmc_bounded_queue::Queue; -use option::*; -use clone::Clone; - -pub struct SleeperList { - priv q: Queue<SchedHandle>, -} - -impl SleeperList { - pub fn new() -> SleeperList { - SleeperList{q: Queue::with_capacity(8*1024)} - } - - pub fn push(&mut self, value: SchedHandle) { - assert!(self.q.push(value)) - } - - pub fn pop(&mut self) -> Option<SchedHandle> { - self.q.pop() - } - - pub fn casual_pop(&mut self) -> Option<SchedHandle> { - self.q.pop() - } -} - -impl Clone for SleeperList { - fn clone(&self) -> SleeperList { - SleeperList { - q: self.q.clone() - } - } -} diff --git a/src/libstd/rt/stack.rs b/src/libstd/rt/stack.rs deleted file mode 100644 index 44b60e955d2..00000000000 --- a/src/libstd/rt/stack.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use container::Container; -use ptr::RawPtr; -use vec; -use ops::Drop; -use libc::{c_uint, uintptr_t}; - -pub struct StackSegment { - priv buf: ~[u8], - priv valgrind_id: c_uint -} - -impl StackSegment { - pub fn new(size: uint) -> StackSegment { - unsafe { - // Crate a block of uninitialized values - let mut stack = vec::with_capacity(size); - stack.set_len(size); - - let mut stk = StackSegment { - buf: stack, - valgrind_id: 0 - }; - - // XXX: Using the FFI to call a C macro. Slow - stk.valgrind_id = rust_valgrind_stack_register(stk.start(), stk.end()); - return stk; - } - } - - /// Point to the low end of the allocated stack - pub fn start(&self) -> *uint { - self.buf.as_ptr() as *uint - } - - /// Point one word beyond the high end of the allocated stack - pub fn end(&self) -> *uint { - unsafe { - self.buf.as_ptr().offset(self.buf.len() as int) as *uint - } - } -} - -impl Drop for StackSegment { - fn drop(&mut self) { - unsafe { - // XXX: Using the FFI to call a C macro. Slow - rust_valgrind_stack_deregister(self.valgrind_id); - } - } -} - -pub struct StackPool(()); - -impl StackPool { - pub fn new() -> StackPool { StackPool(()) } - - fn take_segment(&self, min_size: uint) -> StackSegment { - StackSegment::new(min_size) - } - - fn give_segment(&self, _stack: StackSegment) { - } -} - -extern { - fn rust_valgrind_stack_register(start: *uintptr_t, end: *uintptr_t) -> c_uint; - fn rust_valgrind_stack_deregister(id: c_uint); -} diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 30e05e9091f..7602d7b0564 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -13,29 +13,31 @@ //! local storage, and logging. Even a 'freestanding' Rust would likely want //! to implement this. -use super::local_heap::LocalHeap; - -use prelude::*; - +use any::AnyOwnExt; use borrow; use cleanup; use io::Writer; use libc::{c_char, size_t}; use local_data; +use ops::Drop; use option::{Option, Some, None}; +use prelude::drop; +use result::{Result, Ok, Err}; +use rt::Runtime; use rt::borrowck::BorrowRecord; use rt::borrowck; -use rt::context::Context; -use rt::env; -use rt::kill::Death; use rt::local::Local; +use rt::local_heap::LocalHeap; use rt::logging::StdErrLogger; -use rt::sched::{Scheduler, SchedHandle}; -use rt::stack::{StackSegment, StackPool}; +use rt::rtio::LocalIo; use rt::unwind::Unwinder; use send_str::SendStr; +use sync::arc::UnsafeArc; +use sync::atomics::{AtomicUint, SeqCst}; +use task::{TaskResult, TaskOpts}; use unstable::finally::Finally; -use unstable::mutex::Mutex; + +#[cfg(stage0)] pub use rt::unwind::begin_unwind; // The Task struct represents all state associated with a rust // task. There are at this point two primary "subtypes" of task, @@ -45,201 +47,89 @@ use unstable::mutex::Mutex; pub struct Task { heap: LocalHeap, - priv gc: GarbageCollector, + gc: GarbageCollector, storage: LocalStorage, - logger: Option<StdErrLogger>, unwinder: Unwinder, death: Death, destroyed: bool, name: Option<SendStr>, - coroutine: Option<Coroutine>, - sched: Option<~Scheduler>, - task_type: TaskType, // Dynamic borrowck debugging info borrow_list: Option<~[BorrowRecord]>, + + logger: Option<StdErrLogger>, stdout_handle: Option<~Writer>, - // See the comments in the scheduler about why this is necessary - nasty_deschedule_lock: Mutex, + priv imp: Option<~Runtime>, } -pub enum TaskType { - GreenTask(Option<SchedHome>), - SchedTask -} +pub struct GarbageCollector; +pub struct LocalStorage(Option<local_data::Map>); -/// A coroutine is nothing more than a (register context, stack) pair. -pub struct Coroutine { - /// The segment of stack on which the task is currently running or - /// if the task is blocked, on which the task will resume - /// execution. - /// - /// Servo needs this to be public in order to tell SpiderMonkey - /// about the stack bounds. - current_stack_segment: StackSegment, - /// Always valid if the task is alive and not running. - saved_context: Context +/// A handle to a blocked task. Usually this means having the ~Task pointer by +/// ownership, but if the task is killable, a killer can steal it at any time. +pub enum BlockedTask { + Owned(~Task), + Shared(UnsafeArc<AtomicUint>), } -/// Some tasks have a dedicated home scheduler that they must run on. -pub enum SchedHome { - AnySched, - Sched(SchedHandle) +/// Per-task state related to task death, killing, failure, etc. +pub struct Death { + // Action to be done with the exit code. If set, also makes the task wait + // until all its watched children exit before collecting the status. + on_exit: Option<proc(TaskResult)>, } -pub struct GarbageCollector; -pub struct LocalStorage(Option<local_data::Map>); +pub struct BlockedTaskIterator { + priv inner: UnsafeArc<AtomicUint>, +} impl Task { - - // A helper to build a new task using the dynamically found - // scheduler and task. Only works in GreenTask context. - pub fn build_homed_child(stack_size: Option<uint>, - f: proc(), - home: SchedHome) - -> ~Task { - let mut running_task = Local::borrow(None::<Task>); - let mut sched = running_task.get().sched.take_unwrap(); - let new_task = ~running_task.get() - .new_child_homed(&mut sched.stack_pool, - stack_size, - home, - f); - running_task.get().sched = Some(sched); - new_task - } - - pub fn build_child(stack_size: Option<uint>, f: proc()) -> ~Task { - Task::build_homed_child(stack_size, f, AnySched) - } - - pub fn build_homed_root(stack_size: Option<uint>, - f: proc(), - home: SchedHome) - -> ~Task { - let mut running_task = Local::borrow(None::<Task>); - let mut sched = running_task.get().sched.take_unwrap(); - let new_task = ~Task::new_root_homed(&mut sched.stack_pool, - stack_size, - home, - f); - running_task.get().sched = Some(sched); - new_task - } - - pub fn build_root(stack_size: Option<uint>, f: proc()) -> ~Task { - Task::build_homed_root(stack_size, f, AnySched) - } - - pub fn new_sched_task() -> Task { - Task { - heap: LocalHeap::new(), - gc: GarbageCollector, - storage: LocalStorage(None), - logger: None, - unwinder: Unwinder { unwinding: false, cause: None }, - death: Death::new(), - destroyed: false, - coroutine: Some(Coroutine::empty()), - name: None, - sched: None, - task_type: SchedTask, - borrow_list: None, - stdout_handle: None, - nasty_deschedule_lock: unsafe { Mutex::new() }, - } - } - - pub fn new_root(stack_pool: &mut StackPool, - stack_size: Option<uint>, - start: proc()) -> Task { - Task::new_root_homed(stack_pool, stack_size, AnySched, start) - } - - pub fn new_child(&mut self, - stack_pool: &mut StackPool, - stack_size: Option<uint>, - start: proc()) -> Task { - self.new_child_homed(stack_pool, stack_size, AnySched, start) - } - - pub fn new_root_homed(stack_pool: &mut StackPool, - stack_size: Option<uint>, - home: SchedHome, - start: proc()) -> Task { + pub fn new() -> Task { Task { heap: LocalHeap::new(), gc: GarbageCollector, storage: LocalStorage(None), - logger: None, - unwinder: Unwinder { unwinding: false, cause: None }, + unwinder: Unwinder::new(), death: Death::new(), destroyed: false, name: None, - coroutine: Some(Coroutine::new(stack_pool, stack_size, start)), - sched: None, - task_type: GreenTask(Some(home)), borrow_list: None, - stdout_handle: None, - nasty_deschedule_lock: unsafe { Mutex::new() }, - } - } - - pub fn new_child_homed(&mut self, - stack_pool: &mut StackPool, - stack_size: Option<uint>, - home: SchedHome, - start: proc()) -> Task { - Task { - heap: LocalHeap::new(), - gc: GarbageCollector, - storage: LocalStorage(None), logger: None, - unwinder: Unwinder { unwinding: false, cause: None }, - death: Death::new(), - destroyed: false, - name: None, - coroutine: Some(Coroutine::new(stack_pool, stack_size, start)), - sched: None, - task_type: GreenTask(Some(home)), - borrow_list: None, stdout_handle: None, - nasty_deschedule_lock: unsafe { Mutex::new() }, + imp: None, } } - pub fn give_home(&mut self, new_home: SchedHome) { - match self.task_type { - GreenTask(ref mut home) => { - *home = Some(new_home); - } - SchedTask => { - rtabort!("type error: used SchedTask as GreenTask"); - } - } - } - - pub fn take_unwrap_home(&mut self) -> SchedHome { - match self.task_type { - GreenTask(ref mut home) => { - let out = home.take_unwrap(); - return out; - } - SchedTask => { - rtabort!("type error: used SchedTask as GreenTask"); - } - } - } - - pub fn run(&mut self, f: ||) { - rtdebug!("run called on task: {}", borrow::to_uint(self)); + /// Executes the given closure as if it's running inside this task. The task + /// is consumed upon entry, and the destroyed task is returned from this + /// function in order for the caller to free. This function is guaranteed to + /// not unwind because the closure specified is run inside of a `rust_try` + /// block. (this is the only try/catch block in the world). + /// + /// This function is *not* meant to be abused as a "try/catch" block. This + /// is meant to be used at the absolute boundaries of a task's lifetime, and + /// only for that purpose. + pub fn run(~self, f: ||) -> ~Task { + // Need to put ourselves into TLS, but also need access to the unwinder. + // Unsafely get a handle to the task so we can continue to use it after + // putting it in tls (so we can invoke the unwinder). + let handle: *mut Task = unsafe { + *cast::transmute::<&~Task, &*mut Task>(&self) + }; + Local::put(self); // The only try/catch block in the world. Attempt to run the task's // client-specified code and catch any failures. - self.unwinder.try(|| { + let try_block = || { // Run the task main function, then do some cleanup. f.finally(|| { + fn flush(w: Option<~Writer>) { + match w { + Some(mut w) => { w.flush(); } + None => {} + } + } // First, destroy task-local storage. This may run user dtors. // @@ -260,7 +150,10 @@ impl Task { // TLS, or possibly some destructors for those objects being // annihilated invoke TLS. Sadly these two operations seemed to // be intertwined, and miraculously work for now... - self.storage.take(); + let mut task = Local::borrow(None::<Task>); + let storage = task.get().storage.take(); + drop(task); + drop(storage); // Destroy remaining boxes. Also may run user dtors. unsafe { cleanup::annihilate(); } @@ -268,77 +161,112 @@ impl Task { // Finally flush and destroy any output handles which the task // owns. There are no boxes here, and no user destructors should // run after this any more. - match self.stdout_handle.take() { - Some(handle) => { - let mut handle = handle; - handle.flush(); - } - None => {} - } - self.logger.take(); + let mut task = Local::borrow(None::<Task>); + let stdout = task.get().stdout_handle.take(); + let logger = task.get().logger.take(); + drop(task); + + flush(stdout); + drop(logger); }) - }); + }; + + unsafe { (*handle).unwinder.try(try_block); } // Cleanup the dynamic borrowck debugging info borrowck::clear_task_borrow_list(); - self.death.collect_failure(self.unwinder.result()); - self.destroyed = true; + let mut me: ~Task = Local::take(); + me.death.collect_failure(me.unwinder.result()); + me.destroyed = true; + return me; } - // New utility functions for homes. + /// Inserts a runtime object into this task, transferring ownership to the + /// task. It is illegal to replace a previous runtime object in this task + /// with this argument. + pub fn put_runtime(&mut self, ops: ~Runtime) { + assert!(self.imp.is_none()); + self.imp = Some(ops); + } - pub fn is_home_no_tls(&self, sched: &~Scheduler) -> bool { - match self.task_type { - GreenTask(Some(AnySched)) => { false } - GreenTask(Some(Sched(SchedHandle { sched_id: ref id, .. }))) => { - *id == sched.sched_id() - } - GreenTask(None) => { - rtabort!("task without home"); - } - SchedTask => { - // Awe yea - rtabort!("type error: expected: GreenTask, found: SchedTask"); + /// Attempts to extract the runtime as a specific type. If the runtime does + /// not have the provided type, then the runtime is not removed. If the + /// runtime does have the specified type, then it is removed and returned + /// (transfer of ownership). + /// + /// It is recommended to only use this method when *absolutely necessary*. + /// This function may not be available in the future. + pub fn maybe_take_runtime<T: 'static>(&mut self) -> Option<~T> { + // This is a terrible, terrible function. The general idea here is to + // take the runtime, cast it to ~Any, check if it has the right type, + // and then re-cast it back if necessary. The method of doing this is + // pretty sketchy and involves shuffling vtables of trait objects + // around, but it gets the job done. + // + // XXX: This function is a serious code smell and should be avoided at + // all costs. I have yet to think of a method to avoid this + // function, and I would be saddened if more usage of the function + // crops up. + unsafe { + let imp = self.imp.take_unwrap(); + let &(vtable, _): &(uint, uint) = cast::transmute(&imp); + match imp.wrap().move::<T>() { + Ok(t) => Some(t), + Err(t) => { + let (_, obj): (uint, uint) = cast::transmute(t); + let obj: ~Runtime = cast::transmute((vtable, obj)); + self.put_runtime(obj); + None + } } } } - pub fn homed(&self) -> bool { - match self.task_type { - GreenTask(Some(AnySched)) => { false } - GreenTask(Some(Sched(SchedHandle { .. }))) => { true } - GreenTask(None) => { - rtabort!("task without home"); - } - SchedTask => { - rtabort!("type error: expected: GreenTask, found: SchedTask"); - } - } + /// Spawns a sibling to this task. The newly spawned task is configured with + /// the `opts` structure and will run `f` as the body of its code. + pub fn spawn_sibling(mut ~self, opts: TaskOpts, f: proc()) { + let ops = self.imp.take_unwrap(); + ops.spawn_sibling(self, opts, f) } - // Grab both the scheduler and the task from TLS and check if the - // task is executing on an appropriate scheduler. - pub fn on_appropriate_sched() -> bool { - let mut task = Local::borrow(None::<Task>); - let sched_id = task.get().sched.get_ref().sched_id(); - let sched_run_anything = task.get().sched.get_ref().run_anything; - match task.get().task_type { - GreenTask(Some(AnySched)) => { - rtdebug!("anysched task in sched check ****"); - sched_run_anything - } - GreenTask(Some(Sched(SchedHandle { sched_id: ref id, ..}))) => { - rtdebug!("homed task in sched check ****"); - *id == sched_id - } - GreenTask(None) => { - rtabort!("task without home"); - } - SchedTask => { - rtabort!("type error: expected: GreenTask, found: SchedTask"); - } - } + /// Deschedules the current task, invoking `f` `amt` times. It is not + /// recommended to use this function directly, but rather communication + /// primitives in `std::comm` should be used. + pub fn deschedule(mut ~self, amt: uint, + f: |BlockedTask| -> Result<(), BlockedTask>) { + let ops = self.imp.take_unwrap(); + ops.deschedule(amt, self, f) + } + + /// Wakes up a previously blocked task, optionally specifiying whether the + /// current task can accept a change in scheduling. This function can only + /// be called on tasks that were previously blocked in `deschedule`. + pub fn reawaken(mut ~self, can_resched: bool) { + let ops = self.imp.take_unwrap(); + ops.reawaken(self, can_resched); + } + + /// Yields control of this task to another task. This function will + /// eventually return, but possibly not immediately. This is used as an + /// opportunity to allow other tasks a chance to run. + pub fn yield_now(mut ~self) { + let ops = self.imp.take_unwrap(); + ops.yield_now(self); + } + + /// Similar to `yield_now`, except that this function may immediately return + /// without yielding (depending on what the runtime decides to do). + pub fn maybe_yield(mut ~self) { + let ops = self.imp.take_unwrap(); + ops.maybe_yield(self); + } + + /// Acquires a handle to the I/O factory that this task contains, normally + /// stored in the task's runtime. This factory may not always be available, + /// which is why the return type is `Option` + pub fn local_io<'a>(&'a mut self) -> Option<LocalIo<'a>> { + self.imp.get_mut_ref().local_io() } } @@ -346,253 +274,101 @@ impl Drop for Task { fn drop(&mut self) { rtdebug!("called drop for a task: {}", borrow::to_uint(self)); rtassert!(self.destroyed); - - unsafe { self.nasty_deschedule_lock.destroy(); } } } -// Coroutines represent nothing more than a context and a stack -// segment. - -impl Coroutine { - - pub fn new(stack_pool: &mut StackPool, - stack_size: Option<uint>, - start: proc()) - -> Coroutine { - let stack_size = match stack_size { - Some(size) => size, - None => env::min_stack() - }; - let start = Coroutine::build_start_wrapper(start); - let mut stack = stack_pool.take_segment(stack_size); - let initial_context = Context::new(start, &mut stack); - Coroutine { - current_stack_segment: stack, - saved_context: initial_context - } +impl Iterator<BlockedTask> for BlockedTaskIterator { + fn next(&mut self) -> Option<BlockedTask> { + Some(Shared(self.inner.clone())) } +} - pub fn empty() -> Coroutine { - Coroutine { - current_stack_segment: StackSegment::new(0), - saved_context: Context::empty() +impl BlockedTask { + /// Returns Some if the task was successfully woken; None if already killed. + pub fn wake(self) -> Option<~Task> { + match self { + Owned(task) => Some(task), + Shared(arc) => unsafe { + match (*arc.get()).swap(0, SeqCst) { + 0 => None, + n => Some(cast::transmute(n)), + } + } } } - fn build_start_wrapper(start: proc()) -> proc() { - let wrapper: proc() = proc() { - // First code after swap to this new context. Run our - // cleanup job. - unsafe { + // This assertion has two flavours because the wake involves an atomic op. + // In the faster version, destructors will fail dramatically instead. + #[cfg(not(test))] pub fn trash(self) { } + #[cfg(test)] pub fn trash(self) { assert!(self.wake().is_none()); } - // Again - might work while safe, or it might not. - { - let mut sched = Local::borrow(None::<Scheduler>); - sched.get().run_cleanup_job(); - } + /// Create a blocked task, unless the task was already killed. + pub fn block(task: ~Task) -> BlockedTask { + Owned(task) + } - // To call the run method on a task we need a direct - // reference to it. The task is in TLS, so we can - // simply unsafe_borrow it to get this reference. We - // need to still have the task in TLS though, so we - // need to unsafe_borrow. - let task: *mut Task = Local::unsafe_borrow(); - - let mut start_cell = Some(start); - (*task).run(|| { - // N.B. Removing `start` from the start wrapper - // closure by emptying a cell is critical for - // correctness. The ~Task pointer, and in turn the - // closure used to initialize the first call - // frame, is destroyed in the scheduler context, - // not task context. So any captured closures must - // not contain user-definable dtors that expect to - // be in task context. By moving `start` out of - // the closure, all the user code goes our of - // scope while the task is still running. - let start = start_cell.take_unwrap(); - start(); - }); + /// Converts one blocked task handle to a list of many handles to the same. + pub fn make_selectable(self, num_handles: uint) -> Take<BlockedTaskIterator> + { + let arc = match self { + Owned(task) => { + let flag = unsafe { AtomicUint::new(cast::transmute(task)) }; + UnsafeArc::new(flag) } - - // We remove the sched from the Task in TLS right now. - let sched: ~Scheduler = Local::take(); - // ... allowing us to give it away when performing a - // scheduling operation. - sched.terminate_current_task() + Shared(arc) => arc.clone(), }; - return wrapper; + BlockedTaskIterator{ inner: arc }.take(num_handles) } - /// Destroy coroutine and try to reuse stack segment. - pub fn recycle(self, stack_pool: &mut StackPool) { + /// Convert to an unsafe uint value. Useful for storing in a pipe's state + /// flag. + #[inline] + pub unsafe fn cast_to_uint(self) -> uint { match self { - Coroutine { current_stack_segment, .. } => { - stack_pool.give_segment(current_stack_segment); + Owned(task) => { + let blocked_task_ptr: uint = cast::transmute(task); + rtassert!(blocked_task_ptr & 0x1 == 0); + blocked_task_ptr + } + Shared(arc) => { + let blocked_task_ptr: uint = cast::transmute(~arc); + rtassert!(blocked_task_ptr & 0x1 == 0); + blocked_task_ptr | 0x1 } } } -} - -/// This function is invoked from rust's current __morestack function. Segmented -/// stacks are currently not enabled as segmented stacks, but rather one giant -/// stack segment. This means that whenever we run out of stack, we want to -/// truly consider it to be stack overflow rather than allocating a new stack. -#[no_mangle] // - this is called from C code -#[no_split_stack] // - it would be sad for this function to trigger __morestack -#[doc(hidden)] // - Function must be `pub` to get exported, but it's - // irrelevant for documentation purposes. -#[cfg(not(test))] // in testing, use the original libstd's version -pub extern "C" fn rust_stack_exhausted() { - use rt::context; - use rt::in_green_task_context; - use rt::task::Task; - use rt::local::Local; - use unstable::intrinsics; - - unsafe { - // We're calling this function because the stack just ran out. We need - // to call some other rust functions, but if we invoke the functions - // right now it'll just trigger this handler being called again. In - // order to alleviate this, we move the stack limit to be inside of the - // red zone that was allocated for exactly this reason. - let limit = context::get_sp_limit(); - context::record_sp_limit(limit - context::RED_ZONE / 2); - - // This probably isn't the best course of action. Ideally one would want - // to unwind the stack here instead of just aborting the entire process. - // This is a tricky problem, however. There's a few things which need to - // be considered: - // - // 1. We're here because of a stack overflow, yet unwinding will run - // destructors and hence arbitrary code. What if that code overflows - // the stack? One possibility is to use the above allocation of an - // extra 10k to hope that we don't hit the limit, and if we do then - // abort the whole program. Not the best, but kind of hard to deal - // with unless we want to switch stacks. - // - // 2. LLVM will optimize functions based on whether they can unwind or - // not. It will flag functions with 'nounwind' if it believes that - // the function cannot trigger unwinding, but if we do unwind on - // stack overflow then it means that we could unwind in any function - // anywhere. We would have to make sure that LLVM only places the - // nounwind flag on functions which don't call any other functions. - // - // 3. The function that overflowed may have owned arguments. These - // arguments need to have their destructors run, but we haven't even - // begun executing the function yet, so unwinding will not run the - // any landing pads for these functions. If this is ignored, then - // the arguments will just be leaked. - // - // Exactly what to do here is a very delicate topic, and is possibly - // still up in the air for what exactly to do. Some relevant issues: - // - // #3555 - out-of-stack failure leaks arguments - // #3695 - should there be a stack limit? - // #9855 - possible strategies which could be taken - // #9854 - unwinding on windows through __morestack has never worked - // #2361 - possible implementation of not using landing pads - - if in_green_task_context() { - let mut task = Local::borrow(None::<Task>); - let n = task.get() - .name - .as_ref() - .map(|n| n.as_slice()) - .unwrap_or("<unnamed>"); - - // See the message below for why this is not emitted to the - // task's logger. This has the additional conundrum of the - // logger may not be initialized just yet, meaning that an FFI - // call would happen to initialized it (calling out to libuv), - // and the FFI call needs 2MB of stack when we just ran out. - rterrln!("task '{}' has overflowed its stack", n); + /// Convert from an unsafe uint value. Useful for retrieving a pipe's state + /// flag. + #[inline] + pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask { + if blocked_task_ptr & 0x1 == 0 { + Owned(cast::transmute(blocked_task_ptr)) } else { - rterrln!("stack overflow in non-task context"); + let ptr: ~UnsafeArc<AtomicUint> = + cast::transmute(blocked_task_ptr & !1); + Shared(*ptr) } - - intrinsics::abort(); } } -/// This is the entry point of unwinding for things like lang items and such. -/// The arguments are normally generated by the compiler, and need to -/// have static lifetimes. -pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! { - use c_str::CString; - use cast::transmute; +impl Death { + pub fn new() -> Death { + Death { on_exit: None, } + } - #[inline] - fn static_char_ptr(p: *c_char) -> &'static str { - let s = unsafe { CString::new(p, false) }; - match s.as_str() { - Some(s) => unsafe { transmute::<&str, &'static str>(s) }, - None => rtabort!("message wasn't utf8?") + /// Collect failure exit codes from children and propagate them to a parent. + pub fn collect_failure(&mut self, result: TaskResult) { + match self.on_exit.take() { + Some(f) => f(result), + None => {} } } - - let msg = static_char_ptr(msg); - let file = static_char_ptr(file); - - begin_unwind(msg, file, line as uint) } -/// This is the entry point of unwinding for fail!() and assert!(). -pub fn begin_unwind<M: Any + Send>(msg: M, file: &'static str, line: uint) -> ! { - use any::AnyRefExt; - use rt::in_green_task_context; - use rt::local::Local; - use rt::task::Task; - use str::Str; - use unstable::intrinsics; - - unsafe { - let task: *mut Task; - // Note that this should be the only allocation performed in this block. - // Currently this means that fail!() on OOM will invoke this code path, - // but then again we're not really ready for failing on OOM anyway. If - // we do start doing this, then we should propagate this allocation to - // be performed in the parent of this task instead of the task that's - // failing. - let msg = ~msg as ~Any; - - { - //let msg: &Any = msg; - let msg_s = match msg.as_ref::<&'static str>() { - Some(s) => *s, - None => match msg.as_ref::<~str>() { - Some(s) => s.as_slice(), - None => "~Any", - } - }; - - if !in_green_task_context() { - rterrln!("failed in non-task context at '{}', {}:{}", - msg_s, file, line); - intrinsics::abort(); - } - - task = Local::unsafe_borrow(); - let n = (*task).name.as_ref().map(|n| n.as_slice()).unwrap_or("<unnamed>"); - - // XXX: this should no get forcibly printed to the console, this should - // either be sent to the parent task (ideally), or get printed to - // the task's logger. Right now the logger is actually a uvio - // instance, which uses unkillable blocks internally for various - // reasons. This will cause serious trouble if the task is failing - // due to mismanagment of its own kill flag, so calling our own - // logger in its current state is a bit of a problem. - - rterrln!("task '{}' failed at '{}', {}:{}", n, msg_s, file, line); - - if (*task).unwinder.unwinding { - rtabort!("unwinding again"); - } - } - - (*task).unwinder.begin_unwind(msg); +impl Drop for Death { + fn drop(&mut self) { + // make this type noncopyable } } @@ -690,4 +466,13 @@ mod test { #[test] #[should_fail] fn test_begin_unwind() { begin_unwind("cause", file!(), line!()) } + + // Task blocking tests + + #[test] + fn block_and_wake() { + do with_test_task |task| { + BlockedTask::block(task).wake().unwrap() + } + } } diff --git a/src/libstd/rt/thread.rs b/src/libstd/rt/thread.rs index c72ec3161cd..0542c444a84 100644 --- a/src/libstd/rt/thread.rs +++ b/src/libstd/rt/thread.rs @@ -69,6 +69,12 @@ impl Thread<()> { /// called, when the `Thread` falls out of scope its destructor will block /// waiting for the OS thread. pub fn start<T: Send>(main: proc() -> T) -> Thread<T> { + Thread::start_stack(DEFAULT_STACK_SIZE, main) + } + + /// Performs the same functionality as `start`, but specifies an explicit + /// stack size for the new thread. + pub fn start_stack<T: Send>(stack: uint, main: proc() -> T) -> Thread<T> { // We need the address of the packet to fill in to be stable so when // `main` fills it in it's still valid, so allocate an extra ~ box to do diff --git a/src/libstd/rt/tube.rs b/src/libstd/rt/tube.rs deleted file mode 100644 index 5e867bcdfba..00000000000 --- a/src/libstd/rt/tube.rs +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A very simple unsynchronized channel type for sending buffered data from -//! scheduler context to task context. -//! -//! XXX: This would be safer to use if split into two types like Port/Chan - -use option::*; -use clone::Clone; -use super::rc::RC; -use rt::sched::Scheduler; -use rt::kill::BlockedTask; -use rt::local::Local; -use vec::OwnedVector; -use container::Container; - -struct TubeState<T> { - blocked_task: Option<BlockedTask>, - buf: ~[T] -} - -pub struct Tube<T> { - priv p: RC<TubeState<T>> -} - -impl<T> Tube<T> { - pub fn new() -> Tube<T> { - Tube { - p: RC::new(TubeState { - blocked_task: None, - buf: ~[] - }) - } - } - - pub fn send(&mut self, val: T) { - rtdebug!("tube send"); - unsafe { - let state = self.p.unsafe_borrow_mut(); - (*state).buf.push(val); - - if (*state).blocked_task.is_some() { - // There's a waiting task. Wake it up - rtdebug!("waking blocked tube"); - let task = (*state).blocked_task.take_unwrap(); - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(task); - } - } - } - - pub fn recv(&mut self) -> T { - unsafe { - let state = self.p.unsafe_borrow_mut(); - if !(*state).buf.is_empty() { - return (*state).buf.shift(); - } else { - // Block and wait for the next message - rtdebug!("blocking on tube recv"); - assert!(self.p.refcount() > 1); // There better be somebody to wake us up - assert!((*state).blocked_task.is_none()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|_, task| { - (*state).blocked_task = Some(task); - }); - rtdebug!("waking after tube recv"); - let buf = &mut (*state).buf; - assert!(!buf.is_empty()); - return buf.shift(); - } - } - } -} - -impl<T> Clone for Tube<T> { - fn clone(&self) -> Tube<T> { - Tube { p: self.p.clone() } - } -} - -#[cfg(test)] -mod test { - use rt::test::*; - use rt::rtio::EventLoop; - use rt::sched::Scheduler; - use rt::local::Local; - use super::*; - use prelude::*; - - #[test] - fn simple_test() { - do run_in_newsched_task { - let mut tube: Tube<int> = Tube::new(); - let mut tube_clone = Some(tube.clone()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|sched, task| { - let mut tube_clone = tube_clone.take_unwrap(); - tube_clone.send(1); - sched.enqueue_blocked_task(task); - }); - - assert!(tube.recv() == 1); - } - } - - #[test] - fn blocking_test() { - do run_in_newsched_task { - let mut tube: Tube<int> = Tube::new(); - let mut tube_clone = Some(tube.clone()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|sched, task| { - let tube_clone = tube_clone.take_unwrap(); - do sched.event_loop.callback { - let mut tube_clone = tube_clone; - // The task should be blocked on this now and - // sending will wake it up. - tube_clone.send(1); - } - sched.enqueue_blocked_task(task); - }); - - assert!(tube.recv() == 1); - } - } - - #[test] - fn many_blocking_test() { - static MAX: int = 100; - - do run_in_newsched_task { - let mut tube: Tube<int> = Tube::new(); - let mut tube_clone = Some(tube.clone()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|sched, task| { - callback_send(tube_clone.take_unwrap(), 0); - - fn callback_send(tube: Tube<int>, i: int) { - if i == 100 { - return - } - - let mut sched = Local::borrow(None::<Scheduler>); - do sched.get().event_loop.callback { - let mut tube = tube; - // The task should be blocked on this now and - // sending will wake it up. - tube.send(i); - callback_send(tube, i + 1); - } - } - - sched.enqueue_blocked_task(task); - }); - - for i in range(0, MAX) { - let j = tube.recv(); - assert!(j == i); - } - } - } -} diff --git a/src/libstd/rt/unwind.rs b/src/libstd/rt/unwind.rs index 3f6f54a9c0e..8248c6274ca 100644 --- a/src/libstd/rt/unwind.rs +++ b/src/libstd/rt/unwind.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - // Implementation of Rust stack unwinding // // For background on exception handling and stack unwinding please see "Exception Handling in LLVM" @@ -254,3 +253,74 @@ pub extern "C" fn rust_eh_personality_catch(version: c_int, } } } + +/// This is the entry point of unwinding for things like lang items and such. +/// The arguments are normally generated by the compiler, and need to +/// have static lifetimes. +pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! { + #[inline] + fn static_char_ptr(p: *c_char) -> &'static str { + let s = unsafe { CString::new(p, false) }; + match s.as_str() { + Some(s) => unsafe { cast::transmute::<&str, &'static str>(s) }, + None => rtabort!("message wasn't utf8?") + } + } + + let msg = static_char_ptr(msg); + let file = static_char_ptr(file); + + begin_unwind(msg, file, line as uint) +} + +/// This is the entry point of unwinding for fail!() and assert!(). +pub fn begin_unwind<M: Any + Send>(msg: M, file: &'static str, line: uint) -> ! { + unsafe { + let task: *mut Task; + // Note that this should be the only allocation performed in this block. + // Currently this means that fail!() on OOM will invoke this code path, + // but then again we're not really ready for failing on OOM anyway. If + // we do start doing this, then we should propagate this allocation to + // be performed in the parent of this task instead of the task that's + // failing. + let msg = ~msg as ~Any; + + { + let msg_s = match msg.as_ref::<&'static str>() { + Some(s) => *s, + None => match msg.as_ref::<~str>() { + Some(s) => s.as_slice(), + None => "~Any", + } + }; + + // It is assumed that all reasonable rust code will have a local + // task at all times. This means that this `try_unsafe_borrow` will + // succeed almost all of the time. There are border cases, however, + // when the runtime has *almost* set up the local task, but hasn't + // quite gotten there yet. In order to get some better diagnostics, + // we print on failure and immediately abort the whole process if + // there is no local task available. + match Local::try_unsafe_borrow() { + Some(t) => { + task = t; + let n = (*task).name.as_ref() + .map(|n| n.as_slice()).unwrap_or("<unnamed>"); + + println!("task '{}' failed at '{}', {}:{}", n, msg_s, + file, line); + } + None => { + println!("failed at '{}', {}:{}", msg_s, file, line); + intrinsics::abort(); + } + } + + if (*task).unwinder.unwinding { + rtabort!("unwinding again"); + } + } + + (*task).unwinder.begin_unwind(msg); + } +} diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index 2f3e5be39e6..69c1da39abc 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -15,7 +15,6 @@ use libc; use option::{Some, None, Option}; use os; use str::StrSlice; -use unstable::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst}; use unstable::running_on_valgrind; // Indicates whether we should perform expensive sanity checks, including rtassert! @@ -144,13 +143,3 @@ memory and partly incapable of presentation to others.", unsafe { libc::abort() } } } - -static mut EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT; - -pub fn set_exit_status(code: int) { - unsafe { EXIT_STATUS.store(code, SeqCst) } -} - -pub fn get_exit_status() -> int { - unsafe { EXIT_STATUS.load(SeqCst) } -} |
