From cab44fb076b4bc56a5877304a7b7617325cb8573 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 12 Dec 2013 17:14:18 -0800 Subject: std: Introduce a Runtime trait This trait is used to abstract the differences between 1:1 and M:N scheduling and is the sole dispatch point for the differences between these two scheduling modes. This, and the following series of commits, is not intended to compile. Only after the entire transition is complete are programs expected to compile. --- src/libstd/rt/mod.rs | 294 +++++---------------------------------------------- 1 file changed, 25 insertions(+), 269 deletions(-) (limited to 'src/libstd/rt') diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index df1ebeb6407..be35e7579b7 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -185,41 +185,33 @@ pub mod args; // Support for dynamic borrowck pub mod borrowck; -/// Set up a default runtime configuration, given compiler-supplied arguments. -/// -/// This is invoked by the `start` _language item_ (unstable::lang) to -/// run a Rust executable. -/// -/// # Arguments -/// -/// * `argc` & `argv` - The argument vector. On Unix this information is used -/// by os::args. -/// -/// # Return value -/// -/// The return value is used as the process return code. 0 on success, 101 on error. -pub fn start(argc: int, argv: **u8, main: proc()) -> int { - - init(argc, argv); - let exit_code = run(main); - // unsafe is ok b/c we're sure that the runtime is gone - unsafe { cleanup(); } +/// The default error code of the rust runtime if the main task fails instead +/// of exiting cleanly. +pub static DEFAULT_ERROR_CODE: int = 101; - return exit_code; -} - -/// Like `start` but creates an additional scheduler on the current thread, -/// which in most cases will be the 'main' thread, and pins the main task to it. +/// The interface to the current runtime. /// -/// This is appropriate for running code that must execute on the main thread, -/// such as the platform event loop and GUI. -pub fn start_on_main_thread(argc: int, argv: **u8, main: proc()) -> int { - init(argc, argv); - let exit_code = run_on_main_thread(main); - // unsafe is ok b/c we're sure that the runtime is gone - unsafe { cleanup(); } - - return exit_code; +/// This trait is used as the abstraction between 1:1 and M:N scheduling. The +/// two independent crates, libnative and libgreen, both have objects which +/// implement this trait. The goal of this trait is to encompass all the +/// fundamental differences in functionality between the 1:1 and M:N runtime +/// modes. +pub trait Runtime { + // Necessary scheduling functions, used for channels and blocking I/O + // (sometimes). + fn yield_now(~self, cur_task: ~Task); + fn maybe_yield(~self, cur_task: ~Task); + fn deschedule(~self, times: uint, cur_task: ~Task, + f: |BlockedTask| -> Result<(), BlockedTask>); + fn reawaken(~self, to_wake: ~Task, can_resched: bool); + + // Miscellaneous calls which are very different depending on what context + // you're in. + fn spawn_sibling(~self, cur_task: ~Task, opts: TaskOpts, f: proc()); + fn local_io<'a>(&'a mut self) -> Option>; + + // XXX: This is a serious code smell and this should not exist at all. + fn wrap(~self) -> ~Any; } /// One-time runtime initialization. @@ -250,239 +242,3 @@ pub unsafe fn cleanup() { args::cleanup(); local_ptr::cleanup(); } - -/// Execute the main function in a scheduler. -/// -/// Configures the runtime according to the environment, by default -/// using a task scheduler with the same number of threads as cores. -/// Returns a process exit code. -pub fn run(main: proc()) -> int { - run_(main, false) -} - -pub fn run_on_main_thread(main: proc()) -> int { - run_(main, true) -} - -fn run_(main: proc(), use_main_sched: bool) -> int { - static DEFAULT_ERROR_CODE: int = 101; - - let nscheds = util::default_sched_threads(); - - let mut main = Some(main); - - // The shared list of sleeping schedulers. - let sleepers = SleeperList::new(); - - // Create a work queue for each scheduler, ntimes. Create an extra - // for the main thread if that flag is set. We won't steal from it. - let mut pool = deque::BufferPool::new(); - let arr = vec::from_fn(nscheds, |_| pool.deque()); - let (workers, stealers) = vec::unzip(arr.move_iter()); - - // The schedulers. - let mut scheds = ~[]; - // Handles to the schedulers. When the main task ends these will be - // sent the Shutdown message to terminate the schedulers. - let mut handles = ~[]; - - for worker in workers.move_iter() { - rtdebug!("inserting a regular scheduler"); - - // Every scheduler is driven by an I/O event loop. - let loop_ = new_event_loop(); - let mut sched = ~Scheduler::new(loop_, - worker, - stealers.clone(), - sleepers.clone()); - let handle = sched.make_handle(); - - scheds.push(sched); - handles.push(handle); - } - - // If we need a main-thread task then create a main thread scheduler - // that will reject any task that isn't pinned to it - let main_sched = if use_main_sched { - - // Create a friend handle. - let mut friend_sched = scheds.pop(); - let friend_handle = friend_sched.make_handle(); - scheds.push(friend_sched); - - // This scheduler needs a queue that isn't part of the stealee - // set. - let (worker, _) = pool.deque(); - - let main_loop = new_event_loop(); - let mut main_sched = ~Scheduler::new_special(main_loop, - worker, - stealers.clone(), - sleepers.clone(), - false, - Some(friend_handle)); - let mut main_handle = main_sched.make_handle(); - // Allow the scheduler to exit when the main task exits. - // Note: sending the shutdown message also prevents the scheduler - // from pushing itself to the sleeper list, which is used for - // waking up schedulers for work stealing; since this is a - // non-work-stealing scheduler it should not be adding itself - // to the list. - main_handle.send(Shutdown); - Some(main_sched) - } else { - None - }; - - // Create a shared cell for transmitting the process exit - // code from the main task to this function. - let exit_code = UnsafeArc::new(AtomicInt::new(0)); - let exit_code_clone = exit_code.clone(); - - // Used to sanity check that the runtime only exits once - let exited_already = UnsafeArc::new(AtomicBool::new(false)); - - // When the main task exits, after all the tasks in the main - // task tree, shut down the schedulers and set the exit code. - let handles = handles; - let on_exit: proc(TaskResult) = proc(exit_success) { - unsafe { - assert!(!(*exited_already.get()).swap(true, SeqCst), - "the runtime already exited"); - } - - let mut handles = handles; - for handle in handles.mut_iter() { - handle.send(Shutdown); - } - - unsafe { - let exit_code = if exit_success.is_ok() { - use rt::util; - - // If we're exiting successfully, then return the global - // exit status, which can be set programmatically. - util::get_exit_status() - } else { - DEFAULT_ERROR_CODE - }; - (*exit_code_clone.get()).store(exit_code, SeqCst); - } - }; - - let mut threads = ~[]; - let mut on_exit = Some(on_exit); - - if !use_main_sched { - - // In the case where we do not use a main_thread scheduler we - // run the main task in one of our threads. - - let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, - None, - ::util::replace(&mut main, - None).unwrap()); - main_task.name = Some(SendStrStatic("
")); - main_task.death.on_exit = ::util::replace(&mut on_exit, None); - - let sched = scheds.pop(); - let main_task = main_task; - let thread = do Thread::start { - sched.bootstrap(main_task); - }; - threads.push(thread); - } - - // Run each remaining scheduler in a thread. - for sched in scheds.move_rev_iter() { - rtdebug!("creating regular schedulers"); - let thread = do Thread::start { - let mut sched = sched; - let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || { - rtdebug!("boostraping a non-primary scheduler"); - }; - sched.bootstrap(bootstrap_task); - }; - threads.push(thread); - } - - // If we do have a main thread scheduler, run it now. - - if use_main_sched { - rtdebug!("about to create the main scheduler task"); - - let mut main_sched = main_sched.unwrap(); - - let home = Sched(main_sched.make_handle()); - let mut main_task = ~Task::new_root_homed(&mut main_sched.stack_pool, - None, - home, - ::util::replace(&mut main, - None). - unwrap()); - main_task.name = Some(SendStrStatic("
")); - main_task.death.on_exit = ::util::replace(&mut on_exit, None); - rtdebug!("bootstrapping main_task"); - - main_sched.bootstrap(main_task); - } - - rtdebug!("waiting for threads"); - - // Wait for schedulers - for thread in threads.move_iter() { - thread.join(); - } - - // Return the exit code - unsafe { - (*exit_code.get()).load(SeqCst) - } -} - -pub fn in_sched_context() -> bool { - unsafe { - let task_ptr: Option<*mut Task> = Local::try_unsafe_borrow(); - match task_ptr { - Some(task) => { - match (*task).task_type { - SchedTask => true, - _ => false - } - } - None => false - } - } -} - -pub fn in_green_task_context() -> bool { - unsafe { - let task: Option<*mut Task> = Local::try_unsafe_borrow(); - match task { - Some(task) => { - match (*task).task_type { - GreenTask(_) => true, - _ => false - } - } - None => false - } - } -} - -pub fn new_event_loop() -> ~rtio::EventLoop { - match crate_map::get_crate_map() { - None => {} - Some(map) => { - match map.event_loop_factory { - None => {} - Some(factory) => return factory() - } - } - } - - // If the crate map didn't specify a factory to create an event loop, then - // instead just use a basic event loop missing all I/O services to at least - // get the scheduler running. - return basic::event_loop(); -} -- cgit 1.4.1-3-g733a5 From 1815aea36818cd86ebae607522318f56e35c01a2 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 12 Dec 2013 17:20:03 -0800 Subject: std: Introduce an unstable::stack module This module will be used to manage the OS-specific TLS registers used to specify the bounds of the current rust stack (useful in 1:1 and M:N) --- src/libstd/rt/thread.rs | 4 +- src/libstd/unstable/stack.rs | 274 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 276 insertions(+), 2 deletions(-) create mode 100644 src/libstd/unstable/stack.rs (limited to 'src/libstd/rt') diff --git a/src/libstd/rt/thread.rs b/src/libstd/rt/thread.rs index 6128f310a2e..c72ec3161cd 100644 --- a/src/libstd/rt/thread.rs +++ b/src/libstd/rt/thread.rs @@ -41,9 +41,9 @@ static DEFAULT_STACK_SIZE: libc::size_t = 1024 * 1024; // and invoke it. #[no_split_stack] extern fn thread_start(main: *libc::c_void) -> imp::rust_thread_return { - use rt::context; + use unstable::stack; unsafe { - context::record_stack_bounds(0, uint::max_value); + stack::record_stack_bounds(0, uint::max_value); let f: ~proc() = cast::transmute(main); (*f)(); cast::transmute(0 as imp::rust_thread_return) diff --git a/src/libstd/unstable/stack.rs b/src/libstd/unstable/stack.rs new file mode 100644 index 00000000000..46a3a80be25 --- /dev/null +++ b/src/libstd/unstable/stack.rs @@ -0,0 +1,274 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Rust stack-limit management +//! +//! Currently Rust uses a segmented-stack-like scheme in order to detect stack +//! overflow for rust tasks. In this scheme, the prologue of all functions are +//! preceded with a check to see whether the current stack limits are being +//! exceeded. +//! +//! This module provides the functionality necessary in order to manage these +//! stack limits (which are stored in platform-specific locations). The +//! functions here are used at the borders of the task lifetime in order to +//! manage these limits. +//! +//! This function is an unstable module because this scheme for stack overflow +//! detection is not guaranteed to continue in the future. Usage of this module +//! is discouraged unless absolutely necessary. + +use rt::task::Task; +use option::None; +use rt::local::Local; +use unstable::intrinsics; + +static RED_ZONE: uint = 20 * 1024; + +/// This function is invoked from rust's current __morestack function. Segmented +/// stacks are currently not enabled as segmented stacks, but rather one giant +/// stack segment. This means that whenever we run out of stack, we want to +/// truly consider it to be stack overflow rather than allocating a new stack. +#[no_mangle] // - this is called from C code +#[no_split_stack] // - it would be sad for this function to trigger __morestack +#[doc(hidden)] // - Function must be `pub` to get exported, but it's + // irrelevant for documentation purposes. +#[cfg(not(test))] // in testing, use the original libstd's version +pub extern "C" fn rust_stack_exhausted() { + + unsafe { + // We're calling this function because the stack just ran out. We need + // to call some other rust functions, but if we invoke the functions + // right now it'll just trigger this handler being called again. In + // order to alleviate this, we move the stack limit to be inside of the + // red zone that was allocated for exactly this reason. + let limit = get_sp_limit(); + record_sp_limit(limit - RED_ZONE / 2); + + // This probably isn't the best course of action. Ideally one would want + // to unwind the stack here instead of just aborting the entire process. + // This is a tricky problem, however. There's a few things which need to + // be considered: + // + // 1. We're here because of a stack overflow, yet unwinding will run + // destructors and hence arbitrary code. What if that code overflows + // the stack? One possibility is to use the above allocation of an + // extra 10k to hope that we don't hit the limit, and if we do then + // abort the whole program. Not the best, but kind of hard to deal + // with unless we want to switch stacks. + // + // 2. LLVM will optimize functions based on whether they can unwind or + // not. It will flag functions with 'nounwind' if it believes that + // the function cannot trigger unwinding, but if we do unwind on + // stack overflow then it means that we could unwind in any function + // anywhere. We would have to make sure that LLVM only places the + // nounwind flag on functions which don't call any other functions. + // + // 3. The function that overflowed may have owned arguments. These + // arguments need to have their destructors run, but we haven't even + // begun executing the function yet, so unwinding will not run the + // any landing pads for these functions. If this is ignored, then + // the arguments will just be leaked. + // + // Exactly what to do here is a very delicate topic, and is possibly + // still up in the air for what exactly to do. Some relevant issues: + // + // #3555 - out-of-stack failure leaks arguments + // #3695 - should there be a stack limit? + // #9855 - possible strategies which could be taken + // #9854 - unwinding on windows through __morestack has never worked + // #2361 - possible implementation of not using landing pads + + let mut task = Local::borrow(None::); + let n = task.get().name.as_ref() + .map(|n| n.as_slice()).unwrap_or(""); + + // See the message below for why this is not emitted to the + // task's logger. This has the additional conundrum of the + // logger may not be initialized just yet, meaning that an FFI + // call would happen to initialized it (calling out to libuv), + // and the FFI call needs 2MB of stack when we just ran out. + println!("task '{}' has overflowed its stack", n); + + intrinsics::abort(); + } +} + +#[inline(always)] +pub unsafe fn record_stack_bounds(stack_lo: uint, stack_hi: uint) { + // When the old runtime had segmented stacks, it used a calculation that was + // "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic + // symbol resolution, llvm function calls, etc. In theory this red zone + // value is 0, but it matters far less when we have gigantic stacks because + // we don't need to be so exact about our stack budget. The "fudge factor" + // was because LLVM doesn't emit a stack check for functions < 256 bytes in + // size. Again though, we have giant stacks, so we round all these + // calculations up to the nice round number of 20k. + record_sp_limit(stack_lo + RED_ZONE); + + return target_record_stack_bounds(stack_lo, stack_hi); + + #[cfg(not(windows))] #[cfg(not(target_arch = "x86_64"))] #[inline(always)] + unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {} + #[cfg(windows, target_arch = "x86_64")] #[inline(always)] + unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) { + // Windows compiles C functions which may check the stack bounds. This + // means that if we want to perform valid FFI on windows, then we need + // to ensure that the stack bounds are what they truly are for this + // task. More info can be found at: + // https://github.com/mozilla/rust/issues/3445#issuecomment-26114839 + // + // stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom) + asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile"); + asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile"); + } +} + +/// Records the current limit of the stack as specified by `end`. +/// +/// This is stored in an OS-dependent location, likely inside of the thread +/// local storage. The location that the limit is stored is a pre-ordained +/// location because it's where LLVM has emitted code to check. +/// +/// Note that this cannot be called under normal circumstances. This function is +/// changing the stack limit, so upon returning any further function calls will +/// possibly be triggering the morestack logic if you're not careful. +/// +/// Also note that this and all of the inside functions are all flagged as +/// "inline(always)" because they're messing around with the stack limits. This +/// would be unfortunate for the functions themselves to trigger a morestack +/// invocation (if they were an actual function call). +#[inline(always)] +pub unsafe fn record_sp_limit(limit: uint) { + return target_record_sp_limit(limit); + + // x86-64 + #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + asm!("movq $$0x60+90*8, %rsi + movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile") + } + #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile") + } + #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block + // store this inside of the "arbitrary data slot", but double the size + // because this is 64 bit instead of 32 bit + asm!("movq $0, %gs:0x28" :: "r"(limit) :: "volatile") + } + #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile") + } + + // x86 + #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + asm!("movl $$0x48+90*4, %eax + movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile") + } + #[cfg(target_arch = "x86", target_os = "linux")] + #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile") + } + #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block + // store this inside of the "arbitrary data slot" + asm!("movl $0, %fs:0x14" :: "r"(limit) :: "volatile") + } + + // mips, arm - Some brave soul can port these to inline asm, but it's over + // my head personally + #[cfg(target_arch = "mips")] + #[cfg(target_arch = "arm")] #[inline(always)] + unsafe fn target_record_sp_limit(limit: uint) { + return record_sp_limit(limit as *c_void); + extern { + fn record_sp_limit(limit: *c_void); + } + } +} + +/// The counterpart of the function above, this function will fetch the current +/// stack limit stored in TLS. +/// +/// Note that all of these functions are meant to be exact counterparts of their +/// brethren above, except that the operands are reversed. +/// +/// As with the setter, this function does not have a __morestack header and can +/// therefore be called in a "we're out of stack" situation. +#[inline(always)] +pub unsafe fn get_sp_limit() -> uint { + return target_get_sp_limit(); + + // x86-64 + #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movq $$0x60+90*8, %rsi + movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile"); + return limit; + } + #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile"); + return limit; + } + #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movq %gs:0x28, $0" : "=r"(limit) ::: "volatile"); + return limit; + } + #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile"); + return limit; + } + + // x86 + #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movl $$0x48+90*4, %eax + movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile"); + return limit; + } + #[cfg(target_arch = "x86", target_os = "linux")] + #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile"); + return limit; + } + #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + let limit; + asm!("movl %fs:0x14, $0" : "=r"(limit) ::: "volatile"); + return limit; + } + + // mips, arm - Some brave soul can port these to inline asm, but it's over + // my head personally + #[cfg(target_arch = "mips")] + #[cfg(target_arch = "arm")] #[inline(always)] + unsafe fn target_get_sp_limit() -> uint { + return get_sp_limit() as uint; + extern { + fn get_sp_limit() -> *c_void; + } + } +} -- cgit 1.4.1-3-g733a5 From dafb310ba131b34a8819566201dc8d0af9bbd406 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 12 Dec 2013 17:20:58 -0800 Subject: std: Delete rt::test This module contains many M:N specific concepts. This will no longer be available with libgreen, and most functions aren't really that necessary today anyway. New testing primitives will be introduced as they become available for 1:1 and M:N. A new io::test module is introduced with the new ip4/ip6 address helpers to continue usage in io tests. --- src/libstd/io/net/tcp.rs | 659 ++++++++++++++++++++++------------------------ src/libstd/io/net/udp.rs | 211 +++++++-------- src/libstd/io/net/unix.rs | 98 +++---- src/libstd/io/option.rs | 63 ++--- src/libstd/io/test.rs | 79 ++++++ src/libstd/io/timer.rs | 63 ++--- src/libstd/rt/test.rs | 440 ------------------------------- 7 files changed, 583 insertions(+), 1030 deletions(-) create mode 100644 src/libstd/io/test.rs delete mode 100644 src/libstd/rt/test.rs (limited to 'src/libstd/rt') diff --git a/src/libstd/io/net/tcp.rs b/src/libstd/io/net/tcp.rs index a6230ede7e3..db51653d665 100644 --- a/src/libstd/io/net/tcp.rs +++ b/src/libstd/io/net/tcp.rs @@ -147,468 +147,439 @@ impl Acceptor for TcpAcceptor { #[cfg(test)] mod test { use super::*; - use rt::test::*; use io::net::ip::{Ipv4Addr, SocketAddr}; use io::*; + use io::test::{next_test_ip4, next_test_ip6}; use prelude::*; #[test] #[ignore] fn bind_error() { - do run_in_mt_newsched_task { - let mut called = false; - io_error::cond.trap(|e| { - assert!(e.kind == PermissionDenied); - called = true; - }).inside(|| { - let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; - let listener = TcpListener::bind(addr); - assert!(listener.is_none()); - }); - assert!(called); - } + let mut called = false; + io_error::cond.trap(|e| { + assert!(e.kind == PermissionDenied); + called = true; + }).inside(|| { + let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; + let listener = TcpListener::bind(addr); + assert!(listener.is_none()); + }); + assert!(called); } #[test] fn connect_error() { - do run_in_mt_newsched_task { - let mut called = false; - io_error::cond.trap(|e| { - let expected_error = if cfg!(unix) { - ConnectionRefused - } else { - // On Win32, opening port 1 gives WSAEADDRNOTAVAIL error. - OtherIoError - }; - assert_eq!(e.kind, expected_error); - called = true; - }).inside(|| { - let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; - let stream = TcpStream::connect(addr); - assert!(stream.is_none()); - }); - assert!(called); - } + let mut called = false; + io_error::cond.trap(|e| { + let expected_error = if cfg!(unix) { + ConnectionRefused + } else { + // On Win32, opening port 1 gives WSAEADDRNOTAVAIL error. + OtherIoError + }; + assert_eq!(e.kind, expected_error); + called = true; + }).inside(|| { + let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; + let stream = TcpStream::connect(addr); + assert!(stream.is_none()); + }); + assert!(called); } #[test] fn smoke_test_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == 99); - } + let addr = next_test_ip4(); + let (port, chan) = oneshot(); + do spawn { port.recv(); let mut stream = TcpStream::connect(addr); stream.write([99]); } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == 99); } #[test] fn smoke_test_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == 99); - } + let addr = next_test_ip6(); + let (port, chan) = oneshot(); + do spawn { port.recv(); let mut stream = TcpStream::connect(addr); stream.write([99]); } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == 99); } #[test] fn read_eof_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - let nread = stream.read(buf); - assert!(nread.is_none()); - } + let addr = next_test_ip4(); + let (port, chan) = oneshot(); + do spawn { port.recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + let nread = stream.read(buf); + assert!(nread.is_none()); } #[test] fn read_eof_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - let nread = stream.read(buf); - assert!(nread.is_none()); - } + let addr = next_test_ip6(); + let (port, chan) = oneshot(); + do spawn { port.recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + let nread = stream.read(buf); + assert!(nread.is_none()); } #[test] fn read_eof_twice_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - let nread = stream.read(buf); - assert!(nread.is_none()); - io_error::cond.trap(|e| { - if cfg!(windows) { - assert_eq!(e.kind, NotConnected); - } else { - fail!(); - } - }).inside(|| { - let nread = stream.read(buf); - assert!(nread.is_none()); - }) - } + let addr = next_test_ip4(); + let (port, chan) = oneshot(); - port.recv(); + do spawn { + port.take().recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + let nread = stream.read(buf); + assert!(nread.is_none()); + io_error::cond.trap(|e| { + if cfg!(windows) { + assert_eq!(e.kind, NotConnected); + } else { + fail!(); + } + }).inside(|| { + let nread = stream.read(buf); + assert!(nread.is_none()); + }) } #[test] fn read_eof_twice_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let mut buf = [0]; - let nread = stream.read(buf); - assert!(nread.is_none()); - io_error::cond.trap(|e| { - if cfg!(windows) { - assert_eq!(e.kind, NotConnected); - } else { - fail!(); - } - }).inside(|| { - let nread = stream.read(buf); - assert!(nread.is_none()); - }) - } + let addr = next_test_ip6(); + let (port, chan) = oneshot(); + do spawn { port.recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let mut buf = [0]; + let nread = stream.read(buf); + assert!(nread.is_none()); + io_error::cond.trap(|e| { + if cfg!(windows) { + assert_eq!(e.kind, NotConnected); + } else { + fail!(); + } + }).inside(|| { + let nread = stream.read(buf); + assert!(nread.is_none()); + }) } #[test] fn write_close_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let buf = [0]; - loop { - let mut stop = false; - io_error::cond.trap(|e| { - // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED - // on windows - assert!(e.kind == ConnectionReset || - e.kind == BrokenPipe || - e.kind == ConnectionAborted, - "unknown error: {:?}", e); - stop = true; - }).inside(|| { - stream.write(buf); - }); - if stop { break } - } - } + let addr = next_test_ip4(); + let (port, chan) = oneshot(); + do spawn { port.recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let buf = [0]; + loop { + let mut stop = false; + io_error::cond.trap(|e| { + // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED + // on windows + assert!(e.kind == ConnectionReset || + e.kind == BrokenPipe || + e.kind == ConnectionAborted, + "unknown error: {:?}", e); + stop = true; + }).inside(|| { + stream.write(buf); + }); + if stop { break } + } } #[test] fn write_close_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - let mut stream = acceptor.accept(); - let buf = [0]; - loop { - let mut stop = false; - io_error::cond.trap(|e| { - // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED - // on windows - assert!(e.kind == ConnectionReset || - e.kind == BrokenPipe || - e.kind == ConnectionAborted, - "unknown error: {:?}", e); - stop = true; - }).inside(|| { - stream.write(buf); - }); - if stop { break } - } - } + let addr = next_test_ip6(); + let (port, chan) = oneshot(); + do spawn { port.recv(); let _stream = TcpStream::connect(addr); // Close } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + let mut stream = acceptor.accept(); + let buf = [0]; + loop { + let mut stop = false; + io_error::cond.trap(|e| { + // NB: ECONNRESET on linux, EPIPE on mac, ECONNABORTED + // on windows + assert!(e.kind == ConnectionReset || + e.kind == BrokenPipe || + e.kind == ConnectionAborted, + "unknown error: {:?}", e); + stop = true; + }).inside(|| { + stream.write(buf); + }); + if stop { break } + } } #[test] fn multiple_connect_serial_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - let max = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for ref mut stream in acceptor.incoming().take(max) { - let mut buf = [0]; - stream.read(buf); - assert_eq!(buf[0], 99); - } - } + let addr = next_test_ip4(); + let max = 10; + let (port, chan) = oneshot(); + do spawn { port.recv(); max.times(|| { let mut stream = TcpStream::connect(addr); stream.write([99]); }); } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + for ref mut stream in acceptor.incoming().take(max) { + let mut buf = [0]; + stream.read(buf); + assert_eq!(buf[0], 99); + } } #[test] fn multiple_connect_serial_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - let max = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for ref mut stream in acceptor.incoming().take(max) { - let mut buf = [0]; - stream.read(buf); - assert_eq!(buf[0], 99); - } - } + let addr = next_test_ip6(); + let max = 10; + let (port, chan) = oneshot(); + do spawn { port.recv(); max.times(|| { let mut stream = TcpStream::connect(addr); stream.write([99]); }); } + + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + for ref mut stream in acceptor.incoming().take(max) { + let mut buf = [0]; + stream.read(buf); + assert_eq!(buf[0], 99); + } } #[test] fn multiple_connect_interleaved_greedy_schedule_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - static MAX: int = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) { - // Start another task to handle the connection - do spawntask { - let mut stream = stream; - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == i as u8); - debug!("read"); - } + let addr = next_test_ip4(); + static MAX: int = 10; + let (port, chan) = oneshot(); + + do spawn { + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) { + let stream = Cell::new(stream); + // Start another task to handle the connection + do spawn { + let mut stream = stream.take(); + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == i as u8); + debug!("read"); } } + } - port.recv(); - connect(0, addr); - - fn connect(i: int, addr: SocketAddr) { - if i == MAX { return } - - do spawntask { - debug!("connecting"); - let mut stream = TcpStream::connect(addr); - // Connect again before writing - connect(i + 1, addr); - debug!("writing"); - stream.write([i as u8]); - } + port.recv(); + connect(0, addr); + + fn connect(i: int, addr: SocketAddr) { + if i == MAX { return } + + do spawn { + debug!("connecting"); + let mut stream = TcpStream::connect(addr); + // Connect again before writing + connect(i + 1, addr); + debug!("writing"); + stream.write([i as u8]); } } } #[test] fn multiple_connect_interleaved_greedy_schedule_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - static MAX: int = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) { - // Start another task to handle the connection - do spawntask { - let mut stream = stream; - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == i as u8); - debug!("read"); - } + let addr = next_test_ip6(); + static MAX: int = 10; + let (port, chan) = oneshot(); + + do spawn { + let mut acceptor = TcpListener::bind(addr).listen(); + for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) { + let stream = Cell::new(stream); + // Start another task to handle the connection + do spawn { + let mut stream = stream.take(); + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == i as u8); + debug!("read"); } } + } - port.recv(); - connect(0, addr); - - fn connect(i: int, addr: SocketAddr) { - if i == MAX { return } - - do spawntask { - debug!("connecting"); - let mut stream = TcpStream::connect(addr); - // Connect again before writing - connect(i + 1, addr); - debug!("writing"); - stream.write([i as u8]); - } + port.recv(); + connect(0, addr); + + fn connect(i: int, addr: SocketAddr) { + if i == MAX { return } + + do spawn { + debug!("connecting"); + let mut stream = TcpStream::connect(addr); + // Connect again before writing + connect(i + 1, addr); + debug!("writing"); + stream.write([i as u8]); } } } #[test] fn multiple_connect_interleaved_lazy_schedule_ip4() { - do run_in_mt_newsched_task { - let addr = next_test_ip4(); - static MAX: int = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for stream in acceptor.incoming().take(MAX as uint) { - // Start another task to handle the connection - do spawntask_later { - let mut stream = stream; - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == 99); - debug!("read"); - } + let addr = next_test_ip4(); + static MAX: int = 10; + let (port, chan) = oneshot(); + + do spawn { + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + for stream in acceptor.incoming().take(MAX as uint) { + let stream = Cell::new(stream); + // Start another task to handle the connection + do spawn { + let mut stream = stream.take(); + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == 99); + debug!("read"); } } + } - port.recv(); - connect(0, addr); - - fn connect(i: int, addr: SocketAddr) { - if i == MAX { return } - - do spawntask_later { - debug!("connecting"); - let mut stream = TcpStream::connect(addr); - // Connect again before writing - connect(i + 1, addr); - debug!("writing"); - stream.write([99]); - } + port.recv(); + connect(0, addr); + + fn connect(i: int, addr: SocketAddr) { + if i == MAX { return } + + do spawn { + debug!("connecting"); + let mut stream = TcpStream::connect(addr); + // Connect again before writing + connect(i + 1, addr); + debug!("writing"); + stream.write([99]); } } } #[test] fn multiple_connect_interleaved_lazy_schedule_ip6() { - do run_in_mt_newsched_task { - let addr = next_test_ip6(); - static MAX: int = 10; - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - for stream in acceptor.incoming().take(MAX as uint) { - // Start another task to handle the connection - do spawntask_later { - let mut stream = stream; - let mut buf = [0]; - stream.read(buf); - assert!(buf[0] == 99); - debug!("read"); - } + let addr = next_test_ip6(); + static MAX: int = 10; + let (port, chan) = oneshot(); + + do spawn { + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + for stream in acceptor.incoming().take(MAX as uint) { + let stream = Cell::new(stream); + // Start another task to handle the connection + do spawn { + let mut stream = stream.take(); + let mut buf = [0]; + stream.read(buf); + assert!(buf[0] == 99); + debug!("read"); } } + } - port.recv(); - connect(0, addr); - - fn connect(i: int, addr: SocketAddr) { - if i == MAX { return } - - do spawntask_later { - debug!("connecting"); - let mut stream = TcpStream::connect(addr); - // Connect again before writing - connect(i + 1, addr); - debug!("writing"); - stream.write([99]); - } + port.recv(); + connect(0, addr); + + fn connect(i: int, addr: SocketAddr) { + if i == MAX { return } + + do spawn { + debug!("connecting"); + let mut stream = TcpStream::connect(addr); + // Connect again before writing + connect(i + 1, addr); + debug!("writing"); + stream.write([99]); } } } @@ -631,29 +602,26 @@ mod test { #[cfg(test)] fn peer_name(addr: SocketAddr) { - do run_in_mt_newsched_task { - let (port, chan) = Chan::new(); + let (port, chan) = oneshot(); - do spawntask { - let mut acceptor = TcpListener::bind(addr).listen(); - chan.send(()); - - acceptor.accept(); - } + do spawn { + let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); + acceptor.accept(); + } - port.recv(); - let stream = TcpStream::connect(addr); + port.recv(); + let stream = TcpStream::connect(addr); - assert!(stream.is_some()); - let mut stream = stream.unwrap(); + assert!(stream.is_some()); + let mut stream = stream.unwrap(); - // Make sure peer_name gives us the - // address/port of the peer we've - // connected to. - let peer_name = stream.peer_name(); - assert!(peer_name.is_some()); - assert_eq!(addr, peer_name.unwrap()); - } + // Make sure peer_name gives us the + // address/port of the peer we've + // connected to. + let peer_name = stream.peer_name(); + assert!(peer_name.is_some()); + assert_eq!(addr, peer_name.unwrap()); } #[test] @@ -668,5 +636,4 @@ mod test { //peer_name(next_test_ip6()); socket_name(next_test_ip6()); } - } diff --git a/src/libstd/io/net/udp.rs b/src/libstd/io/net/udp.rs index 1e56f964bea..0a277ee4347 100644 --- a/src/libstd/io/net/udp.rs +++ b/src/libstd/io/net/udp.rs @@ -104,52 +104,31 @@ impl Writer for UdpStream { #[cfg(test)] mod test { use super::*; - use rt::test::*; use io::net::ip::{Ipv4Addr, SocketAddr}; use io::*; use prelude::*; #[test] #[ignore] fn bind_error() { - do run_in_mt_newsched_task { - let mut called = false; - io_error::cond.trap(|e| { - assert!(e.kind == PermissionDenied); - called = true; - }).inside(|| { - let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; - let socket = UdpSocket::bind(addr); - assert!(socket.is_none()); - }); - assert!(called); - } + let mut called = false; + io_error::cond.trap(|e| { + assert!(e.kind == PermissionDenied); + called = true; + }).inside(|| { + let addr = SocketAddr { ip: Ipv4Addr(0, 0, 0, 0), port: 1 }; + let socket = UdpSocket::bind(addr); + assert!(socket.is_none()); + }); + assert!(called); } #[test] fn socket_smoke_test_ip4() { - do run_in_mt_newsched_task { - let server_ip = next_test_ip4(); - let client_ip = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - match UdpSocket::bind(server_ip) { - Some(ref mut server) => { - chan.send(()); - let mut buf = [0]; - match server.recvfrom(buf) { - Some((nread, src)) => { - assert_eq!(nread, 1); - assert_eq!(buf[0], 99); - assert_eq!(src, client_ip); - } - None => fail!() - } - } - None => fail!() - } - } + let server_ip = next_test_ip4(); + let client_ip = next_test_ip4(); + let (port, chan) = oneshot(); + do spawn { match UdpSocket::bind(client_ip) { Some(ref mut client) => { port.recv(); @@ -158,33 +137,31 @@ mod test { None => fail!() } } - } - #[test] - fn socket_smoke_test_ip6() { - do run_in_mt_newsched_task { - let server_ip = next_test_ip6(); - let client_ip = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - match UdpSocket::bind(server_ip) { - Some(ref mut server) => { - chan.send(()); - let mut buf = [0]; - match server.recvfrom(buf) { - Some((nread, src)) => { - assert_eq!(nread, 1); - assert_eq!(buf[0], 99); - assert_eq!(src, client_ip); - } - None => fail!() - } + match UdpSocket::bind(server_ip) { + Some(ref mut server) => { + chan.send(()); + let mut buf = [0]; + match server.recvfrom(buf) { + Some((nread, src)) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); + assert_eq!(src, client_ip); } None => fail!() } } + None => fail!() + } + } + #[test] + fn socket_smoke_test_ip6() { + let server_ip = next_test_ip6(); + let client_ip = next_test_ip6(); + let (port, chan) = oneshot(); + + do spawn { match UdpSocket::bind(client_ip) { Some(ref mut client) => { port.recv(); @@ -193,34 +170,31 @@ mod test { None => fail!() } } - } - #[test] - fn stream_smoke_test_ip4() { - do run_in_mt_newsched_task { - let server_ip = next_test_ip4(); - let client_ip = next_test_ip4(); - let (port, chan) = Chan::new(); - - do spawntask { - match UdpSocket::bind(server_ip) { - Some(server) => { - let server = ~server; - let mut stream = server.connect(client_ip); - chan.send(()); - let mut buf = [0]; - match stream.read(buf) { - Some(nread) => { - assert_eq!(nread, 1); - assert_eq!(buf[0], 99); - } - None => fail!() - } + match UdpSocket::bind(server_ip) { + Some(ref mut server) => { + chan.take().send(()); + let mut buf = [0]; + match server.recvfrom(buf) { + Some((nread, src)) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); + assert_eq!(src, client_ip); } None => fail!() } } + None => fail!() + } + } + + #[test] + fn stream_smoke_test_ip4() { + let server_ip = next_test_ip4(); + let client_ip = next_test_ip4(); + let (port, chan) = oneshot(); + do spawn { match UdpSocket::bind(client_ip) { Some(client) => { let client = ~client; @@ -231,34 +205,32 @@ mod test { None => fail!() } } - } - #[test] - fn stream_smoke_test_ip6() { - do run_in_mt_newsched_task { - let server_ip = next_test_ip6(); - let client_ip = next_test_ip6(); - let (port, chan) = Chan::new(); - - do spawntask { - match UdpSocket::bind(server_ip) { - Some(server) => { - let server = ~server; - let mut stream = server.connect(client_ip); - chan.send(()); - let mut buf = [0]; - match stream.read(buf) { - Some(nread) => { - assert_eq!(nread, 1); - assert_eq!(buf[0], 99); - } - None => fail!() - } + match UdpSocket::bind(server_ip) { + Some(server) => { + let server = ~server; + let mut stream = server.connect(client_ip); + chan.send(()); + let mut buf = [0]; + match stream.read(buf) { + Some(nread) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); } None => fail!() } } + None => fail!() + } + } + + #[test] + fn stream_smoke_test_ip6() { + let server_ip = next_test_ip6(); + let client_ip = next_test_ip6(); + let (port, chan) = oneshot(); + do spawn { match UdpSocket::bind(client_ip) { Some(client) => { let client = ~client; @@ -269,25 +241,36 @@ mod test { None => fail!() } } + + match UdpSocket::bind(server_ip) { + Some(server) => { + let server = ~server; + let mut stream = server.connect(client_ip); + chan.send(()); + let mut buf = [0]; + match stream.read(buf) { + Some(nread) => { + assert_eq!(nread, 1); + assert_eq!(buf[0], 99); + } + None => fail!() + } + } + None => fail!() + } } - #[cfg(test)] fn socket_name(addr: SocketAddr) { - do run_in_mt_newsched_task { - do spawntask { - let server = UdpSocket::bind(addr); - - assert!(server.is_some()); - let mut server = server.unwrap(); + let server = UdpSocket::bind(addr); - // Make sure socket_name gives - // us the socket we binded to. - let so_name = server.socket_name(); - assert!(so_name.is_some()); - assert_eq!(addr, so_name.unwrap()); + assert!(server.is_some()); + let mut server = server.unwrap(); - } - } + // Make sure socket_name gives + // us the socket we binded to. + let so_name = server.socket_name(); + assert!(so_name.is_some()); + assert_eq!(addr, so_name.unwrap()); } #[test] diff --git a/src/libstd/io/net/unix.rs b/src/libstd/io/net/unix.rs index 2766aa9ad27..d8abd1fe50d 100644 --- a/src/libstd/io/net/unix.rs +++ b/src/libstd/io/net/unix.rs @@ -150,55 +150,47 @@ impl Acceptor for UnixAcceptor { mod tests { use prelude::*; use super::*; - use rt::test::*; use io::*; fn smalltest(server: proc(UnixStream), client: proc(UnixStream)) { - do run_in_mt_newsched_task { - let path1 = next_test_unix(); - let path2 = path1.clone(); - let (client, server) = (client, server); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = UnixListener::bind(&path1).listen(); - chan.send(()); - server(acceptor.accept().unwrap()); - } + let path1 = next_test_unix(); + let path2 = path1.clone(); + let (port, chan) = oneshot(); + do spawn { port.recv(); client(UnixStream::connect(&path2).unwrap()); } + + let mut acceptor = UnixListener::bind(&path1).listen(); + chan.send(()); + server(acceptor.accept().unwrap()); } #[test] fn bind_error() { - do run_in_mt_newsched_task { - let mut called = false; - io_error::cond.trap(|e| { - assert!(e.kind == PermissionDenied); - called = true; - }).inside(|| { - let listener = UnixListener::bind(&("path/to/nowhere")); - assert!(listener.is_none()); - }); - assert!(called); - } + let mut called = false; + io_error::cond.trap(|e| { + assert!(e.kind == PermissionDenied); + called = true; + }).inside(|| { + let listener = UnixListener::bind(&("path/to/nowhere")); + assert!(listener.is_none()); + }); + assert!(called); } #[test] fn connect_error() { - do run_in_mt_newsched_task { - let mut called = false; - io_error::cond.trap(|e| { - assert_eq!(e.kind, FileNotFound); - called = true; - }).inside(|| { - let stream = UnixStream::connect(&("path/to/nowhere")); - assert!(stream.is_none()); - }); - assert!(called); - } + let mut called = false; + io_error::cond.trap(|e| { + assert_eq!(e.kind, OtherIoError); + called = true; + }).inside(|| { + let stream = UnixStream::connect(&("path/to/nowhere")); + assert!(stream.is_none()); + }); + assert!(called); } #[test] @@ -244,37 +236,33 @@ mod tests { #[test] fn accept_lots() { - do run_in_mt_newsched_task { - let times = 10; - let path1 = next_test_unix(); - let path2 = path1.clone(); - let (port, chan) = Chan::new(); - - do spawntask { - let mut acceptor = UnixListener::bind(&path1).listen(); - chan.send(()); - times.times(|| { - let mut client = acceptor.accept(); - let mut buf = [0]; - client.read(buf); - assert_eq!(buf[0], 100); - }) - } + let times = 10; + let path1 = next_test_unix(); + let path2 = path1.clone(); + let (port, chan) = oneshot(); + do spawn { port.recv(); times.times(|| { let mut stream = UnixStream::connect(&path2); stream.write([100]); }) } + + let mut acceptor = UnixListener::bind(&path1).listen(); + chan.send(()); + times.times(|| { + let mut client = acceptor.accept(); + let mut buf = [0]; + client.read(buf); + assert_eq!(buf[0], 100); + }) } #[test] fn path_exists() { - do run_in_mt_newsched_task { - let path = next_test_unix(); - let _acceptor = UnixListener::bind(&path).listen(); - assert!(path.exists()); - } + let path = next_test_unix(); + let _acceptor = UnixListener::bind(&path).listen(); + assert!(path.exists()); } } diff --git a/src/libstd/io/option.rs b/src/libstd/io/option.rs index 61c5411f360..a661d6ab7eb 100644 --- a/src/libstd/io/option.rs +++ b/src/libstd/io/option.rs @@ -106,53 +106,46 @@ impl> Acceptor for Option { mod test { use option::*; use super::super::mem::*; - use rt::test::*; use super::super::{PreviousIoError, io_error}; #[test] fn test_option_writer() { - do run_in_mt_newsched_task { - let mut writer: Option = Some(MemWriter::new()); - writer.write([0, 1, 2]); - writer.flush(); - assert_eq!(writer.unwrap().inner(), ~[0, 1, 2]); - } + let mut writer: Option = Some(MemWriter::new()); + writer.write([0, 1, 2]); + writer.flush(); + assert_eq!(writer.unwrap().inner(), ~[0, 1, 2]); } #[test] fn test_option_writer_error() { - do run_in_mt_newsched_task { - let mut writer: Option = None; - - let mut called = false; - io_error::cond.trap(|err| { - assert_eq!(err.kind, PreviousIoError); - called = true; - }).inside(|| { - writer.write([0, 0, 0]); - }); - assert!(called); - - let mut called = false; - io_error::cond.trap(|err| { - assert_eq!(err.kind, PreviousIoError); - called = true; - }).inside(|| { - writer.flush(); - }); - assert!(called); - } + let mut writer: Option = None; + + let mut called = false; + io_error::cond.trap(|err| { + assert_eq!(err.kind, PreviousIoError); + called = true; + }).inside(|| { + writer.write([0, 0, 0]); + }); + assert!(called); + + let mut called = false; + io_error::cond.trap(|err| { + assert_eq!(err.kind, PreviousIoError); + called = true; + }).inside(|| { + writer.flush(); + }); + assert!(called); } #[test] fn test_option_reader() { - do run_in_mt_newsched_task { - let mut reader: Option = Some(MemReader::new(~[0, 1, 2, 3])); - let mut buf = [0, 0]; - reader.read(buf); - assert_eq!(buf, [0, 1]); - assert!(!reader.eof()); - } + let mut reader: Option = Some(MemReader::new(~[0, 1, 2, 3])); + let mut buf = [0, 0]; + reader.read(buf); + assert_eq!(buf, [0, 1]); + assert!(!reader.eof()); } #[test] diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs new file mode 100644 index 00000000000..212e4ebffa8 --- /dev/null +++ b/src/libstd/io/test.rs @@ -0,0 +1,79 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// Get a port number, starting at 9600, for use in tests +pub fn next_test_port() -> u16 { + use unstable::atomics::{AtomicUint, INIT_ATOMIC_UINT, Relaxed}; + static mut next_offset: AtomicUint = INIT_ATOMIC_UINT; + unsafe { + base_port() + next_offset.fetch_add(1, Relaxed) as u16 + } +} + +/// Get a temporary path which could be the location of a unix socket +pub fn next_test_unix() -> Path { + if cfg!(unix) { + os::tmpdir().join(rand::task_rng().gen_ascii_str(20)) + } else { + Path::new(r"\\.\pipe\" + rand::task_rng().gen_ascii_str(20)) + } +} + +/// Get a unique IPv4 localhost:port pair starting at 9600 +pub fn next_test_ip4() -> SocketAddr { + SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() } +} + +/// Get a unique IPv6 localhost:port pair starting at 9600 +pub fn next_test_ip6() -> SocketAddr { + SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() } +} + +/* +XXX: Welcome to MegaHack City. + +The bots run multiple builds at the same time, and these builds +all want to use ports. This function figures out which workspace +it is running in and assigns a port range based on it. +*/ +fn base_port() -> u16 { + use os; + use str::StrSlice; + use vec::ImmutableVector; + + let base = 9600u16; + let range = 1000u16; + + let bases = [ + ("32-opt", base + range * 1), + ("32-noopt", base + range * 2), + ("64-opt", base + range * 3), + ("64-noopt", base + range * 4), + ("64-opt-vg", base + range * 5), + ("all-opt", base + range * 6), + ("snap3", base + range * 7), + ("dist", base + range * 8) + ]; + + // FIXME (#9639): This needs to handle non-utf8 paths + let path = os::getcwd(); + let path_s = path.as_str().unwrap(); + + let mut final_base = base; + + for &(dir, base) in bases.iter() { + if path_s.contains(dir) { + final_base = base; + break; + } + } + + return final_base; +} diff --git a/src/libstd/io/timer.rs b/src/libstd/io/timer.rs index 9d4a72509e7..c86e1a1890b 100644 --- a/src/libstd/io/timer.rs +++ b/src/libstd/io/timer.rs @@ -108,77 +108,60 @@ impl Timer { mod test { use prelude::*; use super::*; - use rt::test::*; #[test] fn test_io_timer_sleep_simple() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - timer.sleep(1); - } + let mut timer = Timer::new().unwrap(); + timer.sleep(1); } #[test] fn test_io_timer_sleep_oneshot() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - timer.oneshot(1).recv(); - } + let mut timer = Timer::new().unwrap(); + timer.oneshot(1).recv(); } #[test] fn test_io_timer_sleep_oneshot_forget() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - timer.oneshot(100000000000); - } + let mut timer = Timer::new().unwrap(); + timer.oneshot(100000000000); } #[test] fn oneshot_twice() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - let port1 = timer.oneshot(10000); - let port = timer.oneshot(1); - port.recv(); - assert_eq!(port1.try_recv(), None); - } + let mut timer = Timer::new().unwrap(); + let port1 = timer.oneshot(10000); + let port = timer.oneshot(1); + port.recv(); + assert_eq!(port1.try_recv(), None); } #[test] fn test_io_timer_oneshot_then_sleep() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - let port = timer.oneshot(100000000000); - timer.sleep(1); // this should invalidate the port + let mut timer = Timer::new().unwrap(); + let port = timer.oneshot(100000000000); + timer.sleep(1); // this should invalidate the port - assert_eq!(port.try_recv(), None); - } + assert_eq!(port.try_recv(), None); } #[test] fn test_io_timer_sleep_periodic() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - let port = timer.periodic(1); - port.recv(); - port.recv(); - port.recv(); - } + let mut timer = Timer::new().unwrap(); + let port = timer.periodic(1); + port.recv(); + port.recv(); + port.recv(); } #[test] fn test_io_timer_sleep_periodic_forget() { - do run_in_mt_newsched_task { - let mut timer = Timer::new().unwrap(); - timer.periodic(100000000000); - } + let mut timer = Timer::new().unwrap(); + timer.periodic(100000000000); } #[test] fn test_io_timer_sleep_standalone() { - do run_in_mt_newsched_task { - sleep(1) - } + sleep(1) } } diff --git a/src/libstd/rt/test.rs b/src/libstd/rt/test.rs deleted file mode 100644 index 2b48b396c99..00000000000 --- a/src/libstd/rt/test.rs +++ /dev/null @@ -1,440 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use io::net::ip::{SocketAddr, Ipv4Addr, Ipv6Addr}; - -use clone::Clone; -use container::Container; -use iter::{Iterator, range}; -use option::{Some, None}; -use os; -use path::GenericPath; -use path::Path; -use rand::Rng; -use rand; -use result::{Result, Ok, Err}; -use rt::basic; -use rt::deque::BufferPool; -use comm::Chan; -use rt::new_event_loop; -use rt::sched::Scheduler; -use rt::sleeper_list::SleeperList; -use rt::task::Task; -use rt::thread::Thread; -use task::TaskResult; -use unstable::{run_in_bare_thread}; -use vec; -use vec::{OwnedVector, MutableVector, ImmutableVector}; - -pub fn new_test_uv_sched() -> Scheduler { - - let mut pool = BufferPool::new(); - let (worker, stealer) = pool.deque(); - - let mut sched = Scheduler::new(new_event_loop(), - worker, - ~[stealer], - SleeperList::new()); - - // Don't wait for the Shutdown message - sched.no_sleep = true; - return sched; - -} - -pub fn new_test_sched() -> Scheduler { - let mut pool = BufferPool::new(); - let (worker, stealer) = pool.deque(); - - let mut sched = Scheduler::new(basic::event_loop(), - worker, - ~[stealer], - SleeperList::new()); - - // Don't wait for the Shutdown message - sched.no_sleep = true; - return sched; -} - -pub fn run_in_uv_task(f: proc()) { - do run_in_bare_thread { - run_in_uv_task_core(f); - } -} - -pub fn run_in_newsched_task(f: proc()) { - do run_in_bare_thread { - run_in_newsched_task_core(f); - } -} - -pub fn run_in_uv_task_core(f: proc()) { - - use rt::sched::Shutdown; - - let mut sched = ~new_test_uv_sched(); - let exit_handle = sched.make_handle(); - - let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) { - let mut exit_handle = exit_handle; - exit_handle.send(Shutdown); - rtassert!(exit_status.is_ok()); - }; - let mut task = ~Task::new_root(&mut sched.stack_pool, None, f); - task.death.on_exit = Some(on_exit); - - sched.bootstrap(task); -} - -pub fn run_in_newsched_task_core(f: proc()) { - use rt::sched::Shutdown; - - let mut sched = ~new_test_sched(); - let exit_handle = sched.make_handle(); - - let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) { - let mut exit_handle = exit_handle; - exit_handle.send(Shutdown); - rtassert!(exit_status.is_ok()); - }; - let mut task = ~Task::new_root(&mut sched.stack_pool, None, f); - task.death.on_exit = Some(on_exit); - - sched.bootstrap(task); -} - -#[cfg(target_os="macos")] -#[allow(non_camel_case_types)] -mod darwin_fd_limit { - /*! - * darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the - * rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low - * for our multithreaded scheduler testing, depending on the number of cores available. - * - * This fixes issue #7772. - */ - - use libc; - type rlim_t = libc::uint64_t; - struct rlimit { - rlim_cur: rlim_t, - rlim_max: rlim_t - } - #[nolink] - extern { - // name probably doesn't need to be mut, but the C function doesn't specify const - fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint, - oldp: *mut libc::c_void, oldlenp: *mut libc::size_t, - newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int; - fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int; - fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int; - } - static CTL_KERN: libc::c_int = 1; - static KERN_MAXFILESPERPROC: libc::c_int = 29; - static RLIMIT_NOFILE: libc::c_int = 8; - - pub unsafe fn raise_fd_limit() { - // The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc - // sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value. - use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null}; - use mem::size_of_val; - use os::last_os_error; - - // Fetch the kern.maxfilesperproc value - let mut mib: [libc::c_int, ..2] = [CTL_KERN, KERN_MAXFILESPERPROC]; - let mut maxfiles: libc::c_int = 0; - let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t; - if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2, - to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void, - to_mut_unsafe_ptr(&mut size), - mut_null(), 0) != 0 { - let err = last_os_error(); - error!("raise_fd_limit: error calling sysctl: {}", err); - return; - } - - // Fetch the current resource limits - let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0}; - if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim)) != 0 { - let err = last_os_error(); - error!("raise_fd_limit: error calling getrlimit: {}", err); - return; - } - - // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit - rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max); - - // Set our newly-increased resource limit - if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim)) != 0 { - let err = last_os_error(); - error!("raise_fd_limit: error calling setrlimit: {}", err); - return; - } - } -} - -#[cfg(not(target_os="macos"))] -mod darwin_fd_limit { - pub unsafe fn raise_fd_limit() {} -} - -#[doc(hidden)] -pub fn prepare_for_lots_of_tests() { - // Bump the fd limit on OS X. See darwin_fd_limit for an explanation. - unsafe { darwin_fd_limit::raise_fd_limit() } -} - -/// Create more than one scheduler and run a function in a task -/// in one of the schedulers. The schedulers will stay alive -/// until the function `f` returns. -pub fn run_in_mt_newsched_task(f: proc()) { - use os; - use from_str::FromStr; - use rt::sched::Shutdown; - use rt::util; - - // see comment in other function (raising fd limits) - prepare_for_lots_of_tests(); - - do run_in_bare_thread { - let nthreads = match os::getenv("RUST_RT_TEST_THREADS") { - Some(nstr) => FromStr::from_str(nstr).unwrap(), - None => { - if util::limit_thread_creation_due_to_osx_and_valgrind() { - 1 - } else { - // Using more threads than cores in test code - // to force the OS to preempt them frequently. - // Assuming that this help stress test concurrent types. - util::num_cpus() * 2 - } - } - }; - - let sleepers = SleeperList::new(); - - let mut handles = ~[]; - let mut scheds = ~[]; - - let mut pool = BufferPool::<~Task>::new(); - let workers = range(0, nthreads).map(|_| pool.deque()); - let (workers, stealers) = vec::unzip(workers); - - for worker in workers.move_iter() { - let loop_ = new_event_loop(); - let mut sched = ~Scheduler::new(loop_, - worker, - stealers.clone(), - sleepers.clone()); - let handle = sched.make_handle(); - - handles.push(handle); - scheds.push(sched); - } - - let handles = handles; // Work around not being able to capture mut - let on_exit: proc(TaskResult) = proc(exit_status: TaskResult) { - // Tell schedulers to exit - let mut handles = handles; - for handle in handles.mut_iter() { - handle.send(Shutdown); - } - - rtassert!(exit_status.is_ok()); - }; - let mut main_task = ~Task::new_root(&mut scheds[0].stack_pool, - None, - f); - main_task.death.on_exit = Some(on_exit); - - let mut threads = ~[]; - - let main_thread = { - let sched = scheds.pop(); - let main_task = main_task; - do Thread::start { - sched.bootstrap(main_task); - } - }; - threads.push(main_thread); - - while !scheds.is_empty() { - let mut sched = scheds.pop(); - let bootstrap_task = ~do Task::new_root(&mut sched.stack_pool, None) || { - rtdebug!("bootstrapping non-primary scheduler"); - }; - let sched = sched; - let thread = do Thread::start { - sched.bootstrap(bootstrap_task); - }; - - threads.push(thread); - } - - // Wait for schedulers - for thread in threads.move_iter() { - thread.join(); - } - } - -} - -/// Test tasks will abort on failure instead of unwinding -pub fn spawntask(f: proc()) { - Scheduler::run_task(Task::build_child(None, f)); -} - -/// Create a new task and run it right now. Aborts on failure -pub fn spawntask_later(f: proc()) { - Scheduler::run_task_later(Task::build_child(None, f)); -} - -pub fn spawntask_random(f: proc()) { - use rand::{Rand, rng}; - - let mut rng = rng(); - let run_now: bool = Rand::rand(&mut rng); - - if run_now { - spawntask(f) - } else { - spawntask_later(f) - } -} - -pub fn spawntask_try(f: proc()) -> Result<(),()> { - - let (port, chan) = Chan::new(); - let on_exit: proc(TaskResult) = proc(exit_status) { - chan.send(exit_status) - }; - - let mut new_task = Task::build_root(None, f); - new_task.death.on_exit = Some(on_exit); - - Scheduler::run_task(new_task); - - let exit_status = port.recv(); - if exit_status.is_ok() { Ok(()) } else { Err(()) } - -} - -/// Spawn a new task in a new scheduler and return a thread handle. -pub fn spawntask_thread(f: proc()) -> Thread<()> { - let thread = do Thread::start { - run_in_newsched_task_core(f); - }; - - return thread; -} - -/// Get a ~Task for testing purposes other than actually scheduling it. -pub fn with_test_task(blk: proc(~Task) -> ~Task) { - do run_in_bare_thread { - let mut sched = ~new_test_sched(); - let task = blk(~Task::new_root(&mut sched.stack_pool, - None, - proc() {})); - cleanup_task(task); - } -} - -/// Use to cleanup tasks created for testing but not "run". -pub fn cleanup_task(mut task: ~Task) { - task.destroyed = true; -} - -/// Get a port number, starting at 9600, for use in tests -pub fn next_test_port() -> u16 { - use unstable::mutex::{Mutex, MUTEX_INIT}; - static mut lock: Mutex = MUTEX_INIT; - static mut next_offset: u16 = 0; - unsafe { - let base = base_port(); - lock.lock(); - let ret = base + next_offset; - next_offset += 1; - lock.unlock(); - return ret; - } -} - -/// Get a temporary path which could be the location of a unix socket -pub fn next_test_unix() -> Path { - if cfg!(unix) { - os::tmpdir().join(rand::task_rng().gen_ascii_str(20)) - } else { - Path::new(r"\\.\pipe\" + rand::task_rng().gen_ascii_str(20)) - } -} - -/// Get a unique IPv4 localhost:port pair starting at 9600 -pub fn next_test_ip4() -> SocketAddr { - SocketAddr { ip: Ipv4Addr(127, 0, 0, 1), port: next_test_port() } -} - -/// Get a unique IPv6 localhost:port pair starting at 9600 -pub fn next_test_ip6() -> SocketAddr { - SocketAddr { ip: Ipv6Addr(0, 0, 0, 0, 0, 0, 0, 1), port: next_test_port() } -} - -/* -XXX: Welcome to MegaHack City. - -The bots run multiple builds at the same time, and these builds -all want to use ports. This function figures out which workspace -it is running in and assigns a port range based on it. -*/ -fn base_port() -> u16 { - use os; - use str::StrSlice; - use vec::ImmutableVector; - - let base = 9600u16; - let range = 1000u16; - - let bases = [ - ("32-opt", base + range * 1), - ("32-noopt", base + range * 2), - ("64-opt", base + range * 3), - ("64-noopt", base + range * 4), - ("64-opt-vg", base + range * 5), - ("all-opt", base + range * 6), - ("snap3", base + range * 7), - ("dist", base + range * 8) - ]; - - // FIXME (#9639): This needs to handle non-utf8 paths - let path = os::getcwd(); - let path_s = path.as_str().unwrap(); - - let mut final_base = base; - - for &(dir, base) in bases.iter() { - if path_s.contains(dir) { - final_base = base; - break; - } - } - - return final_base; -} - -/// Get a constant that represents the number of times to repeat -/// stress tests. Default 1. -pub fn stress_factor() -> uint { - use os::getenv; - use from_str::from_str; - - match getenv("RUST_RT_STRESS") { - Some(val) => from_str::(val).unwrap(), - None => 1 - } -} -- cgit 1.4.1-3-g733a5 From a55c57284d8341ee5b22c5372e77ac0af9479dc5 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 12 Dec 2013 17:27:37 -0800 Subject: std: Introduce std::sync For now, this moves the following modules to std::sync * UnsafeArc (also removed unwrap method) * mpsc_queue * spsc_queue * atomics * mpmc_bounded_queue * deque We may want to remove some of the queues, but for now this moves things out of std::rt into std::sync --- src/libextra/arc.rs | 65 +--- src/libextra/sync.rs | 5 +- src/librustc/middle/trans/debuginfo.rs | 2 +- src/libstd/lib.rs | 1 + src/libstd/rt/deque.rs | 658 -------------------------------- src/libstd/rt/mpmc_bounded_queue.rs | 209 ----------- src/libstd/rt/mpsc_queue.rs | 215 ----------- src/libstd/rt/spsc_queue.rs | 296 --------------- src/libstd/sync/arc.rs | 153 ++++++++ src/libstd/sync/atomics.rs | 603 ++++++++++++++++++++++++++++++ src/libstd/sync/deque.rs | 661 +++++++++++++++++++++++++++++++++ src/libstd/sync/mod.rs | 23 ++ src/libstd/sync/mpmc_bounded_queue.rs | 211 +++++++++++ src/libstd/sync/mpsc_queue.rs | 245 ++++++++++++ src/libstd/sync/spsc_queue.rs | 334 +++++++++++++++++ src/libstd/unstable/atomics.rs | 600 ------------------------------ src/libstd/unstable/dynamic_lib.rs | 9 +- src/libstd/unstable/mod.rs | 1 - src/libstd/unstable/mutex.rs | 2 +- src/libstd/unstable/sync.rs | 481 +----------------------- 20 files changed, 2242 insertions(+), 2532 deletions(-) delete mode 100644 src/libstd/rt/deque.rs delete mode 100644 src/libstd/rt/mpmc_bounded_queue.rs delete mode 100644 src/libstd/rt/mpsc_queue.rs delete mode 100644 src/libstd/rt/spsc_queue.rs create mode 100644 src/libstd/sync/arc.rs create mode 100644 src/libstd/sync/atomics.rs create mode 100644 src/libstd/sync/deque.rs create mode 100644 src/libstd/sync/mod.rs create mode 100644 src/libstd/sync/mpmc_bounded_queue.rs create mode 100644 src/libstd/sync/mpsc_queue.rs create mode 100644 src/libstd/sync/spsc_queue.rs delete mode 100644 src/libstd/unstable/atomics.rs (limited to 'src/libstd/rt') diff --git a/src/libextra/arc.rs b/src/libextra/arc.rs index c1763c37bb5..a411c4e9185 100644 --- a/src/libextra/arc.rs +++ b/src/libextra/arc.rs @@ -45,7 +45,7 @@ use sync; use sync::{Mutex, RWLock}; use std::cast; -use std::unstable::sync::UnsafeArc; +use std::sync::arc::UnsafeArc; use std::task; use std::borrow; @@ -127,20 +127,6 @@ impl Arc { pub fn get<'a>(&'a self) -> &'a T { unsafe { &*self.x.get_immut() } } - - /** - * Retrieve the data back out of the Arc. This function blocks until the - * reference given to it is the last existing one, and then unwrap the data - * instead of destroying it. - * - * If multiple tasks call unwrap, all but the first will fail. Do not call - * unwrap from a task that holds another reference to the same Arc; it is - * guaranteed to deadlock. - */ - pub fn unwrap(self) -> T { - let Arc { x: x } = self; - x.unwrap() - } } impl Clone for Arc { @@ -247,22 +233,6 @@ impl MutexArc { cond: cond }) }) } - - /** - * Retrieves the data, blocking until all other references are dropped, - * exactly as arc::unwrap. - * - * Will additionally fail if another task has failed while accessing the arc. - */ - pub fn unwrap(self) -> T { - let MutexArc { x: x } = self; - let inner = x.unwrap(); - let MutexArcInner { failed: failed, data: data, .. } = inner; - if failed { - fail!("Can't unwrap poisoned MutexArc - another task failed inside!"); - } - data - } } impl MutexArc { @@ -503,23 +473,6 @@ impl RWArc { } } } - - /** - * Retrieves the data, blocking until all other references are dropped, - * exactly as arc::unwrap. - * - * Will additionally fail if another task has failed while accessing the arc - * in write mode. - */ - pub fn unwrap(self) -> T { - let RWArc { x: x, .. } = self; - let inner = x.unwrap(); - let RWArcInner { failed: failed, data: data, .. } = inner; - if failed { - fail!("Can't unwrap poisoned RWArc - another task failed inside!") - } - data - } } // Borrowck rightly complains about immutably aliasing the rwlock in order to @@ -689,22 +642,6 @@ mod tests { }) } - #[test] #[should_fail] - pub fn test_mutex_arc_unwrap_poison() { - let arc = MutexArc::new(1); - let arc2 = ~(&arc).clone(); - let (p, c) = Chan::new(); - do task::spawn { - arc2.access(|one| { - c.send(()); - assert!(*one == 2); - }) - } - let _ = p.recv(); - let one = arc.unwrap(); - assert!(one == 1); - } - #[test] fn test_unsafe_mutex_arc_nested() { unsafe { diff --git a/src/libextra/sync.rs b/src/libextra/sync.rs index 57a7f38696d..fb11eb6a3c4 100644 --- a/src/libextra/sync.rs +++ b/src/libextra/sync.rs @@ -19,8 +19,9 @@ use std::borrow; -use std::unstable::sync::{Exclusive, UnsafeArc}; -use std::unstable::atomics; +use std::unstable::sync::Exclusive; +use std::sync::arc::UnsafeArc; +use std::sync::atomics; use std::unstable::finally::Finally; use std::util; use std::util::NonCopyable; diff --git a/src/librustc/middle/trans/debuginfo.rs b/src/librustc/middle/trans/debuginfo.rs index a77e8f764f3..61fadb7e236 100644 --- a/src/librustc/middle/trans/debuginfo.rs +++ b/src/librustc/middle/trans/debuginfo.rs @@ -146,7 +146,7 @@ use std::hashmap::HashMap; use std::hashmap::HashSet; use std::libc::{c_uint, c_ulonglong, c_longlong}; use std::ptr; -use std::unstable::atomics; +use std::sync::atomics; use std::vec; use syntax::codemap::{Span, Pos}; use syntax::{ast, codemap, ast_util, ast_map, opt_vec}; diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index b2b856c5c83..200e4e63261 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -159,6 +159,7 @@ pub mod trie; pub mod task; pub mod comm; pub mod local_data; +pub mod sync; /* Runtime and platform support */ diff --git a/src/libstd/rt/deque.rs b/src/libstd/rt/deque.rs deleted file mode 100644 index 770fc9ffa12..00000000000 --- a/src/libstd/rt/deque.rs +++ /dev/null @@ -1,658 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A (mostly) lock-free concurrent work-stealing deque -//! -//! This module contains an implementation of the Chase-Lev work stealing deque -//! described in "Dynamic Circular Work-Stealing Deque". The implementation is -//! heavily based on the pseudocode found in the paper. -//! -//! This implementation does not want to have the restriction of a garbage -//! collector for reclamation of buffers, and instead it uses a shared pool of -//! buffers. This shared pool is required for correctness in this -//! implementation. -//! -//! The only lock-synchronized portions of this deque are the buffer allocation -//! and deallocation portions. Otherwise all operations are lock-free. -//! -//! # Example -//! -//! use std::rt::deque::BufferPool; -//! -//! let mut pool = BufferPool::new(); -//! let (mut worker, mut stealer) = pool.deque(); -//! -//! // Only the worker may push/pop -//! worker.push(1); -//! worker.pop(); -//! -//! // Stealers take data from the other end of the deque -//! worker.push(1); -//! stealer.steal(); -//! -//! // Stealers can be cloned to have many stealers stealing in parallel -//! worker.push(1); -//! let mut stealer2 = stealer.clone(); -//! stealer2.steal(); - -// NB: the "buffer pool" strategy is not done for speed, but rather for -// correctness. For more info, see the comment on `swap_buffer` - -// XXX: all atomic operations in this module use a SeqCst ordering. That is -// probably overkill - -use cast; -use clone::Clone; -use iter::range; -use kinds::Send; -use libc; -use mem; -use ops::Drop; -use option::{Option, Some, None}; -use ptr; -use unstable::atomics::{AtomicInt, AtomicPtr, SeqCst}; -use unstable::sync::{UnsafeArc, Exclusive}; - -// Once the queue is less than 1/K full, then it will be downsized. Note that -// the deque requires that this number be less than 2. -static K: int = 4; - -// Minimum number of bits that a buffer size should be. No buffer will resize to -// under this value, and all deques will initially contain a buffer of this -// size. -// -// The size in question is 1 << MIN_BITS -static MIN_BITS: int = 7; - -struct Deque { - bottom: AtomicInt, - top: AtomicInt, - array: AtomicPtr>, - pool: BufferPool, -} - -/// Worker half of the work-stealing deque. This worker has exclusive access to -/// one side of the deque, and uses `push` and `pop` method to manipulate it. -/// -/// There may only be one worker per deque. -pub struct Worker { - priv deque: UnsafeArc>, -} - -/// The stealing half of the work-stealing deque. Stealers have access to the -/// opposite end of the deque from the worker, and they only have access to the -/// `steal` method. -pub struct Stealer { - priv deque: UnsafeArc>, -} - -/// When stealing some data, this is an enumeration of the possible outcomes. -#[deriving(Eq)] -pub enum Stolen { - /// The deque was empty at the time of stealing - Empty, - /// The stealer lost the race for stealing data, and a retry may return more - /// data. - Abort, - /// The stealer has successfully stolen some data. - Data(T), -} - -/// The allocation pool for buffers used by work-stealing deques. Right now this -/// structure is used for reclamation of memory after it is no longer in use by -/// deques. -/// -/// This data structure is protected by a mutex, but it is rarely used. Deques -/// will only use this structure when allocating a new buffer or deallocating a -/// previous one. -pub struct BufferPool { - priv pool: Exclusive<~[~Buffer]>, -} - -/// An internal buffer used by the chase-lev deque. This structure is actually -/// implemented as a circular buffer, and is used as the intermediate storage of -/// the data in the deque. -/// -/// This type is implemented with *T instead of ~[T] for two reasons: -/// -/// 1. There is nothing safe about using this buffer. This easily allows the -/// same value to be read twice in to rust, and there is nothing to -/// prevent this. The usage by the deque must ensure that one of the -/// values is forgotten. Furthermore, we only ever want to manually run -/// destructors for values in this buffer (on drop) because the bounds -/// are defined by the deque it's owned by. -/// -/// 2. We can certainly avoid bounds checks using *T instead of ~[T], although -/// LLVM is probably pretty good at doing this already. -struct Buffer { - storage: *T, - log_size: int, -} - -impl BufferPool { - /// Allocates a new buffer pool which in turn can be used to allocate new - /// deques. - pub fn new() -> BufferPool { - BufferPool { pool: Exclusive::new(~[]) } - } - - /// Allocates a new work-stealing deque which will send/receiving memory to - /// and from this buffer pool. - pub fn deque(&mut self) -> (Worker, Stealer) { - let (a, b) = UnsafeArc::new2(Deque::new(self.clone())); - (Worker { deque: a }, Stealer { deque: b }) - } - - fn alloc(&mut self, bits: int) -> ~Buffer { - unsafe { - self.pool.with(|pool| { - match pool.iter().position(|x| x.size() >= (1 << bits)) { - Some(i) => pool.remove(i), - None => ~Buffer::new(bits) - } - }) - } - } - - fn free(&mut self, buf: ~Buffer) { - unsafe { - let mut buf = Some(buf); - self.pool.with(|pool| { - let buf = buf.take_unwrap(); - match pool.iter().position(|v| v.size() > buf.size()) { - Some(i) => pool.insert(i, buf), - None => pool.push(buf), - } - }) - } - } -} - -impl Clone for BufferPool { - fn clone(&self) -> BufferPool { BufferPool { pool: self.pool.clone() } } -} - -impl Worker { - /// Pushes data onto the front of this work queue. - pub fn push(&mut self, t: T) { - unsafe { (*self.deque.get()).push(t) } - } - /// Pops data off the front of the work queue, returning `None` on an empty - /// queue. - pub fn pop(&mut self) -> Option { - unsafe { (*self.deque.get()).pop() } - } - - /// Gets access to the buffer pool that this worker is attached to. This can - /// be used to create more deques which share the same buffer pool as this - /// deque. - pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool { - unsafe { &mut (*self.deque.get()).pool } - } -} - -impl Stealer { - /// Steals work off the end of the queue (opposite of the worker's end) - pub fn steal(&mut self) -> Stolen { - unsafe { (*self.deque.get()).steal() } - } - - /// Gets access to the buffer pool that this stealer is attached to. This - /// can be used to create more deques which share the same buffer pool as - /// this deque. - pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool { - unsafe { &mut (*self.deque.get()).pool } - } -} - -impl Clone for Stealer { - fn clone(&self) -> Stealer { Stealer { deque: self.deque.clone() } } -} - -// Almost all of this code can be found directly in the paper so I'm not -// personally going to heavily comment what's going on here. - -impl Deque { - fn new(mut pool: BufferPool) -> Deque { - let buf = pool.alloc(MIN_BITS); - Deque { - bottom: AtomicInt::new(0), - top: AtomicInt::new(0), - array: AtomicPtr::new(unsafe { cast::transmute(buf) }), - pool: pool, - } - } - - unsafe fn push(&mut self, data: T) { - let mut b = self.bottom.load(SeqCst); - let t = self.top.load(SeqCst); - let mut a = self.array.load(SeqCst); - let size = b - t; - if size >= (*a).size() - 1 { - // You won't find this code in the chase-lev deque paper. This is - // alluded to in a small footnote, however. We always free a buffer - // when growing in order to prevent leaks. - a = self.swap_buffer(b, a, (*a).resize(b, t, 1)); - b = self.bottom.load(SeqCst); - } - (*a).put(b, data); - self.bottom.store(b + 1, SeqCst); - } - - unsafe fn pop(&mut self) -> Option { - let b = self.bottom.load(SeqCst); - let a = self.array.load(SeqCst); - let b = b - 1; - self.bottom.store(b, SeqCst); - let t = self.top.load(SeqCst); - let size = b - t; - if size < 0 { - self.bottom.store(t, SeqCst); - return None; - } - let data = (*a).get(b); - if size > 0 { - self.maybe_shrink(b, t); - return Some(data); - } - if self.top.compare_and_swap(t, t + 1, SeqCst) == t { - self.bottom.store(t + 1, SeqCst); - return Some(data); - } else { - self.bottom.store(t + 1, SeqCst); - cast::forget(data); // someone else stole this value - return None; - } - } - - unsafe fn steal(&mut self) -> Stolen { - let t = self.top.load(SeqCst); - let old = self.array.load(SeqCst); - let b = self.bottom.load(SeqCst); - let a = self.array.load(SeqCst); - let size = b - t; - if size <= 0 { return Empty } - if size % (*a).size() == 0 { - if a == old && t == self.top.load(SeqCst) { - return Empty - } - return Abort - } - let data = (*a).get(t); - if self.top.compare_and_swap(t, t + 1, SeqCst) == t { - Data(data) - } else { - cast::forget(data); // someone else stole this value - Abort - } - } - - unsafe fn maybe_shrink(&mut self, b: int, t: int) { - let a = self.array.load(SeqCst); - if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) { - self.swap_buffer(b, a, (*a).resize(b, t, -1)); - } - } - - // Helper routine not mentioned in the paper which is used in growing and - // shrinking buffers to swap in a new buffer into place. As a bit of a - // recap, the whole point that we need a buffer pool rather than just - // calling malloc/free directly is that stealers can continue using buffers - // after this method has called 'free' on it. The continued usage is simply - // a read followed by a forget, but we must make sure that the memory can - // continue to be read after we flag this buffer for reclamation. - unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer, - buf: Buffer) -> *mut Buffer { - let newbuf: *mut Buffer = cast::transmute(~buf); - self.array.store(newbuf, SeqCst); - let ss = (*newbuf).size(); - self.bottom.store(b + ss, SeqCst); - let t = self.top.load(SeqCst); - if self.top.compare_and_swap(t, t + ss, SeqCst) != t { - self.bottom.store(b, SeqCst); - } - self.pool.free(cast::transmute(old)); - return newbuf; - } -} - - -#[unsafe_destructor] -impl Drop for Deque { - fn drop(&mut self) { - let t = self.top.load(SeqCst); - let b = self.bottom.load(SeqCst); - let a = self.array.load(SeqCst); - // Free whatever is leftover in the dequeue, and then move the buffer - // back into the pool. - for i in range(t, b) { - let _: T = unsafe { (*a).get(i) }; - } - self.pool.free(unsafe { cast::transmute(a) }); - } -} - -impl Buffer { - unsafe fn new(log_size: int) -> Buffer { - let size = (1 << log_size) * mem::size_of::(); - let buffer = libc::malloc(size as libc::size_t); - assert!(!buffer.is_null()); - Buffer { - storage: buffer as *T, - log_size: log_size, - } - } - - fn size(&self) -> int { 1 << self.log_size } - - // Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly - fn mask(&self) -> int { (1 << self.log_size) - 1 } - - // This does not protect against loading duplicate values of the same cell, - // nor does this clear out the contents contained within. Hence, this is a - // very unsafe method which the caller needs to treat specially in case a - // race is lost. - unsafe fn get(&self, i: int) -> T { - ptr::read_ptr(self.storage.offset(i & self.mask())) - } - - // Unsafe because this unsafely overwrites possibly uninitialized or - // initialized data. - unsafe fn put(&mut self, i: int, t: T) { - let ptr = self.storage.offset(i & self.mask()); - ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1); - cast::forget(t); - } - - // Again, unsafe because this has incredibly dubious ownership violations. - // It is assumed that this buffer is immediately dropped. - unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer { - let mut buf = Buffer::new(self.log_size + delta); - for i in range(t, b) { - buf.put(i, self.get(i)); - } - return buf; - } -} - -#[unsafe_destructor] -impl Drop for Buffer { - fn drop(&mut self) { - // It is assumed that all buffers are empty on drop. - unsafe { libc::free(self.storage as *libc::c_void) } - } -} - -#[cfg(test)] -mod tests { - use prelude::*; - use super::{Data, BufferPool, Abort, Empty, Worker, Stealer}; - - use cast; - use rt::thread::Thread; - use rand; - use rand::Rng; - use unstable::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst, - AtomicUint, INIT_ATOMIC_UINT}; - use vec; - - #[test] - fn smoke() { - let mut pool = BufferPool::new(); - let (mut w, mut s) = pool.deque(); - assert_eq!(w.pop(), None); - assert_eq!(s.steal(), Empty); - w.push(1); - assert_eq!(w.pop(), Some(1)); - w.push(1); - assert_eq!(s.steal(), Data(1)); - w.push(1); - assert_eq!(s.clone().steal(), Data(1)); - } - - #[test] - fn stealpush() { - static AMT: int = 100000; - let mut pool = BufferPool::::new(); - let (mut w, s) = pool.deque(); - let t = do Thread::start { - let mut s = s; - let mut left = AMT; - while left > 0 { - match s.steal() { - Data(i) => { - assert_eq!(i, 1); - left -= 1; - } - Abort | Empty => {} - } - } - }; - - for _ in range(0, AMT) { - w.push(1); - } - - t.join(); - } - - #[test] - fn stealpush_large() { - static AMT: int = 100000; - let mut pool = BufferPool::<(int, int)>::new(); - let (mut w, s) = pool.deque(); - let t = do Thread::start { - let mut s = s; - let mut left = AMT; - while left > 0 { - match s.steal() { - Data((1, 10)) => { left -= 1; } - Data(..) => fail!(), - Abort | Empty => {} - } - } - }; - - for _ in range(0, AMT) { - w.push((1, 10)); - } - - t.join(); - } - - fn stampede(mut w: Worker<~int>, s: Stealer<~int>, - nthreads: int, amt: uint) { - for _ in range(0, amt) { - w.push(~20); - } - let mut remaining = AtomicUint::new(amt); - let unsafe_remaining: *mut AtomicUint = &mut remaining; - - let threads = range(0, nthreads).map(|_| { - let s = s.clone(); - do Thread::start { - unsafe { - let mut s = s; - while (*unsafe_remaining).load(SeqCst) > 0 { - match s.steal() { - Data(~20) => { - (*unsafe_remaining).fetch_sub(1, SeqCst); - } - Data(..) => fail!(), - Abort | Empty => {} - } - } - } - } - }).to_owned_vec(); - - while remaining.load(SeqCst) > 0 { - match w.pop() { - Some(~20) => { remaining.fetch_sub(1, SeqCst); } - Some(..) => fail!(), - None => {} - } - } - - for thread in threads.move_iter() { - thread.join(); - } - } - - #[test] - fn run_stampede() { - let mut pool = BufferPool::<~int>::new(); - let (w, s) = pool.deque(); - stampede(w, s, 8, 10000); - } - - #[test] - fn many_stampede() { - static AMT: uint = 4; - let mut pool = BufferPool::<~int>::new(); - let threads = range(0, AMT).map(|_| { - let (w, s) = pool.deque(); - do Thread::start { - stampede(w, s, 4, 10000); - } - }).to_owned_vec(); - - for thread in threads.move_iter() { - thread.join(); - } - } - - #[test] - fn stress() { - static AMT: int = 100000; - static NTHREADS: int = 8; - static mut DONE: AtomicBool = INIT_ATOMIC_BOOL; - static mut HITS: AtomicUint = INIT_ATOMIC_UINT; - let mut pool = BufferPool::::new(); - let (mut w, s) = pool.deque(); - - let threads = range(0, NTHREADS).map(|_| { - let s = s.clone(); - do Thread::start { - unsafe { - let mut s = s; - loop { - match s.steal() { - Data(2) => { HITS.fetch_add(1, SeqCst); } - Data(..) => fail!(), - _ if DONE.load(SeqCst) => break, - _ => {} - } - } - } - } - }).to_owned_vec(); - - let mut rng = rand::task_rng(); - let mut expected = 0; - while expected < AMT { - if rng.gen_range(0, 3) == 2 { - match w.pop() { - None => {} - Some(2) => unsafe { HITS.fetch_add(1, SeqCst); }, - Some(_) => fail!(), - } - } else { - expected += 1; - w.push(2); - } - } - - unsafe { - while HITS.load(SeqCst) < AMT as uint { - match w.pop() { - None => {} - Some(2) => { HITS.fetch_add(1, SeqCst); }, - Some(_) => fail!(), - } - } - DONE.store(true, SeqCst); - } - - for thread in threads.move_iter() { - thread.join(); - } - - assert_eq!(unsafe { HITS.load(SeqCst) }, expected as uint); - } - - #[test] - #[ignore(cfg(windows))] // apparently windows scheduling is weird? - fn no_starvation() { - static AMT: int = 10000; - static NTHREADS: int = 4; - static mut DONE: AtomicBool = INIT_ATOMIC_BOOL; - let mut pool = BufferPool::<(int, uint)>::new(); - let (mut w, s) = pool.deque(); - - let (threads, hits) = vec::unzip(range(0, NTHREADS).map(|_| { - let s = s.clone(); - let unique_box = ~AtomicUint::new(0); - let thread_box = unsafe { - *cast::transmute::<&~AtomicUint,**mut AtomicUint>(&unique_box) - }; - (do Thread::start { - unsafe { - let mut s = s; - loop { - match s.steal() { - Data((1, 2)) => { - (*thread_box).fetch_add(1, SeqCst); - } - Data(..) => fail!(), - _ if DONE.load(SeqCst) => break, - _ => {} - } - } - } - }, unique_box) - })); - - let mut rng = rand::task_rng(); - let mut myhit = false; - let mut iter = 0; - 'outer: loop { - for _ in range(0, rng.gen_range(0, AMT)) { - if !myhit && rng.gen_range(0, 3) == 2 { - match w.pop() { - None => {} - Some((1, 2)) => myhit = true, - Some(_) => fail!(), - } - } else { - w.push((1, 2)); - } - } - iter += 1; - - debug!("loop iteration {}", iter); - for (i, slot) in hits.iter().enumerate() { - let amt = slot.load(SeqCst); - debug!("thread {}: {}", i, amt); - if amt == 0 { continue 'outer; } - } - if myhit { - break - } - } - - unsafe { DONE.store(true, SeqCst); } - - for thread in threads.move_iter() { - thread.join(); - } - } -} - diff --git a/src/libstd/rt/mpmc_bounded_queue.rs b/src/libstd/rt/mpmc_bounded_queue.rs deleted file mode 100644 index 25a3ba8ab48..00000000000 --- a/src/libstd/rt/mpmc_bounded_queue.rs +++ /dev/null @@ -1,209 +0,0 @@ -/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are - * those of the authors and should not be interpreted as representing official - * policies, either expressed or implied, of Dmitry Vyukov. - */ - -// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue - -use unstable::sync::UnsafeArc; -use unstable::atomics::{AtomicUint,Relaxed,Release,Acquire}; -use option::*; -use vec; -use clone::Clone; -use kinds::Send; -use num::{Exponential,Algebraic,Round}; - -struct Node { - sequence: AtomicUint, - value: Option, -} - -struct State { - pad0: [u8, ..64], - buffer: ~[Node], - mask: uint, - pad1: [u8, ..64], - enqueue_pos: AtomicUint, - pad2: [u8, ..64], - dequeue_pos: AtomicUint, - pad3: [u8, ..64], -} - -pub struct Queue { - priv state: UnsafeArc>, -} - -impl State { - fn with_capacity(capacity: uint) -> State { - let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 { - if capacity < 2 { - 2u - } else { - // use next power of 2 as capacity - 2f64.pow(&((capacity as f64).log2().ceil())) as uint - } - } else { - capacity - }; - let buffer = vec::from_fn(capacity, |i:uint| { - Node{sequence:AtomicUint::new(i),value:None} - }); - State{ - pad0: [0, ..64], - buffer: buffer, - mask: capacity-1, - pad1: [0, ..64], - enqueue_pos: AtomicUint::new(0), - pad2: [0, ..64], - dequeue_pos: AtomicUint::new(0), - pad3: [0, ..64], - } - } - - fn push(&mut self, value: T) -> bool { - let mask = self.mask; - let mut pos = self.enqueue_pos.load(Relaxed); - loop { - let node = &mut self.buffer[pos & mask]; - let seq = node.sequence.load(Acquire); - let diff: int = seq as int - pos as int; - - if diff == 0 { - let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed); - if enqueue_pos == pos { - node.value = Some(value); - node.sequence.store(pos+1, Release); - break - } else { - pos = enqueue_pos; - } - } else if (diff < 0) { - return false - } else { - pos = self.enqueue_pos.load(Relaxed); - } - } - true - } - - fn pop(&mut self) -> Option { - let mask = self.mask; - let mut pos = self.dequeue_pos.load(Relaxed); - loop { - let node = &mut self.buffer[pos & mask]; - let seq = node.sequence.load(Acquire); - let diff: int = seq as int - (pos + 1) as int; - if diff == 0 { - let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed); - if dequeue_pos == pos { - let value = node.value.take(); - node.sequence.store(pos + mask + 1, Release); - return value - } else { - pos = dequeue_pos; - } - } else if diff < 0 { - return None - } else { - pos = self.dequeue_pos.load(Relaxed); - } - } - } -} - -impl Queue { - pub fn with_capacity(capacity: uint) -> Queue { - Queue{ - state: UnsafeArc::new(State::with_capacity(capacity)) - } - } - - pub fn push(&mut self, value: T) -> bool { - unsafe { (*self.state.get()).push(value) } - } - - pub fn pop(&mut self) -> Option { - unsafe { (*self.state.get()).pop() } - } -} - -impl Clone for Queue { - fn clone(&self) -> Queue { - Queue { - state: self.state.clone() - } - } -} - -#[cfg(test)] -mod tests { - use prelude::*; - use option::*; - use task; - use super::Queue; - - #[test] - fn test() { - let nthreads = 8u; - let nmsgs = 1000u; - let mut q = Queue::with_capacity(nthreads*nmsgs); - assert_eq!(None, q.pop()); - - for _ in range(0, nthreads) { - let q = q.clone(); - do task::spawn_sched(task::SingleThreaded) { - let mut q = q; - for i in range(0, nmsgs) { - assert!(q.push(i)); - } - } - } - - let mut completion_ports = ~[]; - for _ in range(0, nthreads) { - let (completion_port, completion_chan) = Chan::new(); - completion_ports.push(completion_port); - let q = q.clone(); - do task::spawn_sched(task::SingleThreaded) { - let mut q = q; - let mut i = 0u; - loop { - match q.pop() { - None => {}, - Some(_) => { - i += 1; - if i == nmsgs { break } - } - } - } - completion_chan.send(i); - } - } - - for completion_port in completion_ports.mut_iter() { - assert_eq!(nmsgs, completion_port.recv()); - } - } -} diff --git a/src/libstd/rt/mpsc_queue.rs b/src/libstd/rt/mpsc_queue.rs deleted file mode 100644 index d575028af70..00000000000 --- a/src/libstd/rt/mpsc_queue.rs +++ /dev/null @@ -1,215 +0,0 @@ -/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are - * those of the authors and should not be interpreted as representing official - * policies, either expressed or implied, of Dmitry Vyukov. - */ - -//! A mostly lock-free multi-producer, single consumer queue. - -// http://www.1024cores.net/home/lock-free-algorithms -// /queues/non-intrusive-mpsc-node-based-queue - -use cast; -use clone::Clone; -use kinds::Send; -use ops::Drop; -use option::{Option, None, Some}; -use unstable::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed}; -use unstable::sync::UnsafeArc; - -pub enum PopResult { - /// Some data has been popped - Data(T), - /// The queue is empty - Empty, - /// The queue is in an inconsistent state. Popping data should succeed, but - /// some pushers have yet to make enough progress in order allow a pop to - /// succeed. It is recommended that a pop() occur "in the near future" in - /// order to see if the sender has made progress or not - Inconsistent, -} - -struct Node { - next: AtomicPtr>, - value: Option, -} - -struct State { - head: AtomicPtr>, - tail: *mut Node, - packet: P, -} - -pub struct Consumer { - priv state: UnsafeArc>, -} - -pub struct Producer { - priv state: UnsafeArc>, -} - -impl Clone for Producer { - fn clone(&self) -> Producer { - Producer { state: self.state.clone() } - } -} - -pub fn queue(p: P) -> (Consumer, Producer) { - unsafe { - let (a, b) = UnsafeArc::new2(State::new(p)); - (Consumer { state: a }, Producer { state: b }) - } -} - -impl Node { - unsafe fn new(v: Option) -> *mut Node { - cast::transmute(~Node { - next: AtomicPtr::new(0 as *mut Node), - value: v, - }) - } -} - -impl State { - pub unsafe fn new(p: P) -> State { - let stub = Node::new(None); - State { - head: AtomicPtr::new(stub), - tail: stub, - packet: p, - } - } - - unsafe fn push(&mut self, t: T) { - let n = Node::new(Some(t)); - let prev = self.head.swap(n, AcqRel); - (*prev).next.store(n, Release); - } - - unsafe fn pop(&mut self) -> PopResult { - let tail = self.tail; - let next = (*tail).next.load(Acquire); - - if !next.is_null() { - self.tail = next; - assert!((*tail).value.is_none()); - assert!((*next).value.is_some()); - let ret = (*next).value.take_unwrap(); - let _: ~Node = cast::transmute(tail); - return Data(ret); - } - - if self.head.load(Acquire) == tail {Empty} else {Inconsistent} - } - - unsafe fn is_empty(&mut self) -> bool { - return (*self.tail).next.load(Acquire).is_null(); - } -} - -#[unsafe_destructor] -impl Drop for State { - fn drop(&mut self) { - unsafe { - let mut cur = self.tail; - while !cur.is_null() { - let next = (*cur).next.load(Relaxed); - let _: ~Node = cast::transmute(cur); - cur = next; - } - } - } -} - -impl Producer { - pub fn push(&mut self, value: T) { - unsafe { (*self.state.get()).push(value) } - } - pub fn is_empty(&self) -> bool { - unsafe{ (*self.state.get()).is_empty() } - } - pub unsafe fn packet(&self) -> *mut P { - &mut (*self.state.get()).packet as *mut P - } -} - -impl Consumer { - pub fn pop(&mut self) -> PopResult { - unsafe { (*self.state.get()).pop() } - } - pub fn casual_pop(&mut self) -> Option { - match self.pop() { - Data(t) => Some(t), - Empty | Inconsistent => None, - } - } - pub unsafe fn packet(&self) -> *mut P { - &mut (*self.state.get()).packet as *mut P - } -} - -#[cfg(test)] -mod tests { - use prelude::*; - - use task; - use super::{queue, Data, Empty, Inconsistent}; - - #[test] - fn test_full() { - let (_, mut p) = queue(()); - p.push(~1); - p.push(~2); - } - - #[test] - fn test() { - let nthreads = 8u; - let nmsgs = 1000u; - let (mut c, p) = queue(()); - match c.pop() { - Empty => {} - Inconsistent | Data(..) => fail!() - } - - for _ in range(0, nthreads) { - let q = p.clone(); - do task::spawn_sched(task::SingleThreaded) { - let mut q = q; - for i in range(0, nmsgs) { - q.push(i); - } - } - } - - let mut i = 0u; - while i < nthreads * nmsgs { - match c.pop() { - Empty | Inconsistent => {}, - Data(_) => { i += 1 } - } - } - } -} - diff --git a/src/libstd/rt/spsc_queue.rs b/src/libstd/rt/spsc_queue.rs deleted file mode 100644 index f14533d726a..00000000000 --- a/src/libstd/rt/spsc_queue.rs +++ /dev/null @@ -1,296 +0,0 @@ -/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are met: - * - * 1. Redistributions of source code must retain the above copyright notice, - * this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT - * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF - * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE - * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF - * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - * The views and conclusions contained in the software and documentation are - * those of the authors and should not be interpreted as representing official - * policies, either expressed or implied, of Dmitry Vyukov. - */ - -// http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue -use cast; -use kinds::Send; -use ops::Drop; -use option::{Some, None, Option}; -use unstable::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release}; -use unstable::sync::UnsafeArc; - -// Node within the linked list queue of messages to send -struct Node { - // XXX: this could be an uninitialized T if we're careful enough, and - // that would reduce memory usage (and be a bit faster). - // is it worth it? - value: Option, // nullable for re-use of nodes - next: AtomicPtr>, // next node in the queue -} - -// The producer/consumer halves both need access to the `tail` field, and if -// they both have access to that we may as well just give them both access -// to this whole structure. -struct State { - // consumer fields - tail: *mut Node, // where to pop from - tail_prev: AtomicPtr>, // where to pop from - - // producer fields - head: *mut Node, // where to push to - first: *mut Node, // where to get new nodes from - tail_copy: *mut Node, // between first/tail - - // Cache maintenance fields. Additions and subtractions are stored - // separately in order to allow them to use nonatomic addition/subtraction. - cache_bound: uint, - cache_additions: AtomicUint, - cache_subtractions: AtomicUint, - - packet: P, -} - -pub struct Producer { - priv state: UnsafeArc>, -} - -pub struct Consumer { - priv state: UnsafeArc>, -} - -pub fn queue(bound: uint, - p: P) -> (Consumer, Producer) -{ - let n1 = Node::new(); - let n2 = Node::new(); - unsafe { (*n1).next.store(n2, Relaxed) } - let state = State { - tail: n2, - tail_prev: AtomicPtr::new(n1), - head: n2, - first: n1, - tail_copy: n1, - cache_bound: bound, - cache_additions: AtomicUint::new(0), - cache_subtractions: AtomicUint::new(0), - packet: p, - }; - let (arc1, arc2) = UnsafeArc::new2(state); - (Consumer { state: arc1 }, Producer { state: arc2 }) -} - -impl Node { - fn new() -> *mut Node { - unsafe { - cast::transmute(~Node { - value: None, - next: AtomicPtr::new(0 as *mut Node), - }) - } - } -} - -impl Producer { - pub fn push(&mut self, t: T) { - unsafe { (*self.state.get()).push(t) } - } - pub fn is_empty(&self) -> bool { - unsafe { (*self.state.get()).is_empty() } - } - pub unsafe fn packet(&self) -> *mut P { - &mut (*self.state.get()).packet as *mut P - } -} - -impl Consumer { - pub fn pop(&mut self) -> Option { - unsafe { (*self.state.get()).pop() } - } - pub unsafe fn packet(&self) -> *mut P { - &mut (*self.state.get()).packet as *mut P - } -} - -impl State { - // remember that there is only one thread executing `push` (and only one - // thread executing `pop`) - unsafe fn push(&mut self, t: T) { - // Acquire a node (which either uses a cached one or allocates a new - // one), and then append this to the 'head' node. - let n = self.alloc(); - assert!((*n).value.is_none()); - (*n).value = Some(t); - (*n).next.store(0 as *mut Node, Relaxed); - (*self.head).next.store(n, Release); - self.head = n; - } - - unsafe fn alloc(&mut self) -> *mut Node { - // First try to see if we can consume the 'first' node for our uses. - // We try to avoid as many atomic instructions as possible here, so - // the addition to cache_subtractions is not atomic (plus we're the - // only one subtracting from the cache). - if self.first != self.tail_copy { - if self.cache_bound > 0 { - let b = self.cache_subtractions.load(Relaxed); - self.cache_subtractions.store(b + 1, Relaxed); - } - let ret = self.first; - self.first = (*ret).next.load(Relaxed); - return ret; - } - // If the above fails, then update our copy of the tail and try - // again. - self.tail_copy = self.tail_prev.load(Acquire); - if self.first != self.tail_copy { - if self.cache_bound > 0 { - let b = self.cache_subtractions.load(Relaxed); - self.cache_subtractions.store(b + 1, Relaxed); - } - let ret = self.first; - self.first = (*ret).next.load(Relaxed); - return ret; - } - // If all of that fails, then we have to allocate a new node - // (there's nothing in the node cache). - Node::new() - } - - // remember that there is only one thread executing `pop` (and only one - // thread executing `push`) - unsafe fn pop(&mut self) -> Option { - // The `tail` node is not actually a used node, but rather a - // sentinel from where we should start popping from. Hence, look at - // tail's next field and see if we can use it. If we do a pop, then - // the current tail node is a candidate for going into the cache. - let tail = self.tail; - let next = (*tail).next.load(Acquire); - if next.is_null() { return None } - assert!((*next).value.is_some()); - let ret = (*next).value.take(); - - self.tail = next; - if self.cache_bound == 0 { - self.tail_prev.store(tail, Release); - } else { - // XXX: this is dubious with overflow. - let additions = self.cache_additions.load(Relaxed); - let subtractions = self.cache_subtractions.load(Relaxed); - let size = additions - subtractions; - - if size < self.cache_bound { - self.tail_prev.store(tail, Release); - self.cache_additions.store(additions + 1, Relaxed); - } else { - (*self.tail_prev.load(Relaxed)).next.store(next, Relaxed); - // We have successfully erased all references to 'tail', so - // now we can safely drop it. - let _: ~Node = cast::transmute(tail); - } - } - return ret; - } - - unsafe fn is_empty(&self) -> bool { - let tail = self.tail; - let next = (*tail).next.load(Acquire); - return next.is_null(); - } -} - -#[unsafe_destructor] -impl Drop for State { - fn drop(&mut self) { - unsafe { - let mut cur = self.first; - while !cur.is_null() { - let next = (*cur).next.load(Relaxed); - let _n: ~Node = cast::transmute(cur); - cur = next; - } - } - } -} - -#[cfg(test)] -mod test { - use prelude::*; - use super::queue; - use task; - - #[test] - fn smoke() { - let (mut c, mut p) = queue(0, ()); - p.push(1); - p.push(2); - assert_eq!(c.pop(), Some(1)); - assert_eq!(c.pop(), Some(2)); - assert_eq!(c.pop(), None); - p.push(3); - p.push(4); - assert_eq!(c.pop(), Some(3)); - assert_eq!(c.pop(), Some(4)); - assert_eq!(c.pop(), None); - } - - #[test] - fn drop_full() { - let (_, mut p) = queue(0, ()); - p.push(~1); - p.push(~2); - } - - #[test] - fn smoke_bound() { - let (mut c, mut p) = queue(1, ()); - p.push(1); - p.push(2); - assert_eq!(c.pop(), Some(1)); - assert_eq!(c.pop(), Some(2)); - assert_eq!(c.pop(), None); - p.push(3); - p.push(4); - assert_eq!(c.pop(), Some(3)); - assert_eq!(c.pop(), Some(4)); - assert_eq!(c.pop(), None); - } - - #[test] - fn stress() { - stress_bound(0); - stress_bound(1); - - fn stress_bound(bound: uint) { - let (c, mut p) = queue(bound, ()); - do task::spawn_sched(task::SingleThreaded) { - let mut c = c; - for _ in range(0, 100000) { - loop { - match c.pop() { - Some(1) => break, - Some(_) => fail!(), - None => {} - } - } - } - } - for _ in range(0, 100000) { - p.push(1); - } - } - } -} diff --git a/src/libstd/sync/arc.rs b/src/libstd/sync/arc.rs new file mode 100644 index 00000000000..7632ec6cf29 --- /dev/null +++ b/src/libstd/sync/arc.rs @@ -0,0 +1,153 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Atomically reference counted data +//! +//! This modules contains the implementation of an atomically reference counted +//! pointer for the purpose of sharing data between tasks. This is obviously a +//! very unsafe primitive to use, but it has its use cases when implementing +//! concurrent data structures and similar tasks. +//! +//! Great care must be taken to ensure that data races do not arise through the +//! usage of `UnsafeArc`, and this often requires some form of external +//! synchronization. The only guarantee provided to you by this class is that +//! the underlying data will remain valid (not free'd) so long as the reference +//! count is greater than one. + +use cast; +use clone::Clone; +use kinds::Send; +use ops::Drop; +use ptr::RawPtr; +use sync::atomics::{AtomicUint, SeqCst, Relaxed, Acquire}; +use vec; + +/// An atomically reference counted pointer. +/// +/// Enforces no shared-memory safety. +#[unsafe_no_drop_flag] +pub struct UnsafeArc { + priv data: *mut ArcData, +} + +struct ArcData { + count: AtomicUint, + data: T, +} + +unsafe fn new_inner(data: T, refcount: uint) -> *mut ArcData { + let data = ~ArcData { count: AtomicUint::new(refcount), data: data }; + cast::transmute(data) +} + +impl UnsafeArc { + /// Creates a new `UnsafeArc` which wraps the given data. + pub fn new(data: T) -> UnsafeArc { + unsafe { UnsafeArc { data: new_inner(data, 1) } } + } + + /// As new(), but returns an extra pre-cloned handle. + pub fn new2(data: T) -> (UnsafeArc, UnsafeArc) { + unsafe { + let ptr = new_inner(data, 2); + (UnsafeArc { data: ptr }, UnsafeArc { data: ptr }) + } + } + + /// As new(), but returns a vector of as many pre-cloned handles as + /// requested. + pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc] { + unsafe { + if num_handles == 0 { + ~[] // need to free data here + } else { + let ptr = new_inner(data, num_handles); + vec::from_fn(num_handles, |_| UnsafeArc { data: ptr }) + } + } + } + + /// Gets a pointer to the inner shared data. Note that care must be taken to + /// ensure that the outer `UnsafeArc` does not fall out of scope while this + /// pointer is in use, otherwise it could possibly contain a use-after-free. + #[inline] + pub fn get(&self) -> *mut T { + unsafe { + assert!((*self.data).count.load(Relaxed) > 0); + return &mut (*self.data).data as *mut T; + } + } + + /// Gets an immutable pointer to the inner shared data. This has the same + /// caveats as the `get` method. + #[inline] + pub fn get_immut(&self) -> *T { + unsafe { + assert!((*self.data).count.load(Relaxed) > 0); + return &(*self.data).data as *T; + } + } +} + +impl Clone for UnsafeArc { + fn clone(&self) -> UnsafeArc { + unsafe { + // This barrier might be unnecessary, but I'm not sure... + let old_count = (*self.data).count.fetch_add(1, Acquire); + assert!(old_count >= 1); + return UnsafeArc { data: self.data }; + } + } +} + +#[unsafe_destructor] +impl Drop for UnsafeArc{ + fn drop(&mut self) { + unsafe { + // Happens when destructing an unwrapper's handle and from + // `#[unsafe_no_drop_flag]` + if self.data.is_null() { + return + } + // Must be acquire+release, not just release, to make sure this + // doesn't get reordered to after the unwrapper pointer load. + let old_count = (*self.data).count.fetch_sub(1, SeqCst); + assert!(old_count >= 1); + if old_count == 1 { + let _: ~ArcData = cast::transmute(self.data); + } + } + } +} + +#[cfg(test)] +mod tests { + use prelude::*; + use super::UnsafeArc; + use task; + use mem::size_of; + + #[test] + fn test_size() { + assert_eq!(size_of::>(), size_of::<*[int, ..10]>()); + } + + #[test] + fn arclike_newN() { + // Tests that the many-refcounts-at-once constructors don't leak. + let _ = UnsafeArc::new2(~~"hello"); + let x = UnsafeArc::newN(~~"hello", 0); + assert_eq!(x.len(), 0) + let x = UnsafeArc::newN(~~"hello", 1); + assert_eq!(x.len(), 1) + let x = UnsafeArc::newN(~~"hello", 10); + assert_eq!(x.len(), 10) + } +} diff --git a/src/libstd/sync/atomics.rs b/src/libstd/sync/atomics.rs new file mode 100644 index 00000000000..bc9d99c0f37 --- /dev/null +++ b/src/libstd/sync/atomics.rs @@ -0,0 +1,603 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/*! + * Atomic types + * + * Basic atomic types supporting atomic operations. Each method takes an + * `Ordering` which represents the strength of the memory barrier for that + * operation. These orderings are the same as C++11 atomic orderings + * [http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync] + * + * All atomic types are a single word in size. + */ + +#[allow(missing_doc)]; + +use unstable::intrinsics; +use cast; +use option::{Option,Some,None}; +use libc::c_void; +use ops::Drop; +use util::NonCopyable; + +/** + * A simple atomic flag, that can be set and cleared. The most basic atomic type. + */ +pub struct AtomicFlag { + priv v: int, + priv nocopy: NonCopyable +} + +/** + * An atomic boolean type. + */ +pub struct AtomicBool { + priv v: uint, + priv nocopy: NonCopyable +} + +/** + * A signed atomic integer type, supporting basic atomic arithmetic operations + */ +pub struct AtomicInt { + priv v: int, + priv nocopy: NonCopyable +} + +/** + * An unsigned atomic integer type, supporting basic atomic arithmetic operations + */ +pub struct AtomicUint { + priv v: uint, + priv nocopy: NonCopyable +} + +/** + * An unsafe atomic pointer. Only supports basic atomic operations + */ +pub struct AtomicPtr { + priv p: *mut T, + priv nocopy: NonCopyable +} + +/** + * An owned atomic pointer. Ensures that only a single reference to the data is held at any time. + */ +#[unsafe_no_drop_flag] +pub struct AtomicOption { + priv p: *mut c_void +} + +pub enum Ordering { + Relaxed, + Release, + Acquire, + AcqRel, + SeqCst +} + +pub static INIT_ATOMIC_FLAG : AtomicFlag = AtomicFlag { v: 0, nocopy: NonCopyable }; +pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: 0, nocopy: NonCopyable }; +pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: 0, nocopy: NonCopyable }; +pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: 0, nocopy: NonCopyable }; + +impl AtomicFlag { + + pub fn new() -> AtomicFlag { + AtomicFlag { v: 0, nocopy: NonCopyable } + } + + /** + * Clears the atomic flag + */ + #[inline] + pub fn clear(&mut self, order: Ordering) { + unsafe {atomic_store(&mut self.v, 0, order)} + } + + /** + * Sets the flag if it was previously unset, returns the previous value of the + * flag. + */ + #[inline] + pub fn test_and_set(&mut self, order: Ordering) -> bool { + unsafe { atomic_compare_and_swap(&mut self.v, 0, 1, order) > 0 } + } +} + +impl AtomicBool { + pub fn new(v: bool) -> AtomicBool { + AtomicBool { v: if v { 1 } else { 0 }, nocopy: NonCopyable } + } + + #[inline] + pub fn load(&self, order: Ordering) -> bool { + unsafe { atomic_load(&self.v, order) > 0 } + } + + #[inline] + pub fn store(&mut self, val: bool, order: Ordering) { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_swap(&mut self.v, val, order) > 0 } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: bool, new: bool, order: Ordering) -> bool { + let old = if old { 1 } else { 0 }; + let new = if new { 1 } else { 0 }; + + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_and(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_and(&mut self.v, val, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_nand(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_nand(&mut self.v, val, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_or(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_or(&mut self.v, val, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_xor(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_xor(&mut self.v, val, order) > 0 } + } +} + +impl AtomicInt { + pub fn new(v: int) -> AtomicInt { + AtomicInt { v:v, nocopy: NonCopyable } + } + + #[inline] + pub fn load(&self, order: Ordering) -> int { + unsafe { atomic_load(&self.v, order) } + } + + #[inline] + pub fn store(&mut self, val: int, order: Ordering) { + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: int, order: Ordering) -> int { + unsafe { atomic_swap(&mut self.v, val, order) } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: int, new: int, order: Ordering) -> int { + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + } + + /// Returns the old value (like __sync_fetch_and_add). + #[inline] + pub fn fetch_add(&mut self, val: int, order: Ordering) -> int { + unsafe { atomic_add(&mut self.v, val, order) } + } + + /// Returns the old value (like __sync_fetch_and_sub). + #[inline] + pub fn fetch_sub(&mut self, val: int, order: Ordering) -> int { + unsafe { atomic_sub(&mut self.v, val, order) } + } +} + +impl AtomicUint { + pub fn new(v: uint) -> AtomicUint { + AtomicUint { v:v, nocopy: NonCopyable } + } + + #[inline] + pub fn load(&self, order: Ordering) -> uint { + unsafe { atomic_load(&self.v, order) } + } + + #[inline] + pub fn store(&mut self, val: uint, order: Ordering) { + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: uint, order: Ordering) -> uint { + unsafe { atomic_swap(&mut self.v, val, order) } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: uint, new: uint, order: Ordering) -> uint { + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + } + + /// Returns the old value (like __sync_fetch_and_add). + #[inline] + pub fn fetch_add(&mut self, val: uint, order: Ordering) -> uint { + unsafe { atomic_add(&mut self.v, val, order) } + } + + /// Returns the old value (like __sync_fetch_and_sub).. + #[inline] + pub fn fetch_sub(&mut self, val: uint, order: Ordering) -> uint { + unsafe { atomic_sub(&mut self.v, val, order) } + } +} + +impl AtomicPtr { + pub fn new(p: *mut T) -> AtomicPtr { + AtomicPtr { p:p, nocopy: NonCopyable } + } + + #[inline] + pub fn load(&self, order: Ordering) -> *mut T { + unsafe { atomic_load(&self.p, order) } + } + + #[inline] + pub fn store(&mut self, ptr: *mut T, order: Ordering) { + unsafe { atomic_store(&mut self.p, ptr, order); } + } + + #[inline] + pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T { + unsafe { atomic_swap(&mut self.p, ptr, order) } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { + unsafe { atomic_compare_and_swap(&mut self.p, old, new, order) } + } +} + +impl AtomicOption { + pub fn new(p: ~T) -> AtomicOption { + unsafe { + AtomicOption { + p: cast::transmute(p) + } + } + } + + pub fn empty() -> AtomicOption { + unsafe { + AtomicOption { + p: cast::transmute(0) + } + } + } + + #[inline] + pub fn swap(&mut self, val: ~T, order: Ordering) -> Option<~T> { + unsafe { + let val = cast::transmute(val); + + let p = atomic_swap(&mut self.p, val, order); + let pv : &uint = cast::transmute(&p); + + if *pv == 0 { + None + } else { + Some(cast::transmute(p)) + } + } + } + + #[inline] + pub fn take(&mut self, order: Ordering) -> Option<~T> { + unsafe { + self.swap(cast::transmute(0), order) + } + } + + /// A compare-and-swap. Succeeds if the option is 'None' and returns 'None' + /// if so. If the option was already 'Some', returns 'Some' of the rejected + /// value. + #[inline] + pub fn fill(&mut self, val: ~T, order: Ordering) -> Option<~T> { + unsafe { + let val = cast::transmute(val); + let expected = cast::transmute(0); + let oldval = atomic_compare_and_swap(&mut self.p, expected, val, order); + if oldval == expected { + None + } else { + Some(cast::transmute(val)) + } + } + } + + /// Be careful: The caller must have some external method of ensuring the + /// result does not get invalidated by another task after this returns. + #[inline] + pub fn is_empty(&mut self, order: Ordering) -> bool { + unsafe { atomic_load(&self.p, order) == cast::transmute(0) } + } +} + +#[unsafe_destructor] +impl Drop for AtomicOption { + fn drop(&mut self) { + let _ = self.take(SeqCst); + } +} + +#[inline] +pub unsafe fn atomic_store(dst: &mut T, val: T, order:Ordering) { + let dst = cast::transmute(dst); + let val = cast::transmute(val); + + match order { + Release => intrinsics::atomic_store_rel(dst, val), + Relaxed => intrinsics::atomic_store_relaxed(dst, val), + _ => intrinsics::atomic_store(dst, val) + } +} + +#[inline] +pub unsafe fn atomic_load(dst: &T, order:Ordering) -> T { + let dst = cast::transmute(dst); + + cast::transmute(match order { + Acquire => intrinsics::atomic_load_acq(dst), + Relaxed => intrinsics::atomic_load_relaxed(dst), + _ => intrinsics::atomic_load(dst) + }) +} + +#[inline] +pub unsafe fn atomic_swap(dst: &mut T, val: T, order: Ordering) -> T { + let dst = cast::transmute(dst); + let val = cast::transmute(val); + + cast::transmute(match order { + Acquire => intrinsics::atomic_xchg_acq(dst, val), + Release => intrinsics::atomic_xchg_rel(dst, val), + AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), + Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), + _ => intrinsics::atomic_xchg(dst, val) + }) +} + +/// Returns the old value (like __sync_fetch_and_add). +#[inline] +pub unsafe fn atomic_add(dst: &mut T, val: T, order: Ordering) -> T { + let dst = cast::transmute(dst); + let val = cast::transmute(val); + + cast::transmute(match order { + Acquire => intrinsics::atomic_xadd_acq(dst, val), + Release => intrinsics::atomic_xadd_rel(dst, val), + AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), + Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), + _ => intrinsics::atomic_xadd(dst, val) + }) +} + +/// Returns the old value (like __sync_fetch_and_sub). +#[inline] +pub unsafe fn atomic_sub(dst: &mut T, val: T, order: Ordering) -> T { + let dst = cast::transmute(dst); + let val = cast::transmute(val); + + cast::transmute(match order { + Acquire => intrinsics::atomic_xsub_acq(dst, val), + Release => intrinsics::atomic_xsub_rel(dst, val), + AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), + Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), + _ => intrinsics::atomic_xsub(dst, val) + }) +} + +#[inline] +pub unsafe fn atomic_compare_and_swap(dst:&mut T, old:T, new:T, order: Ordering) -> T { + let dst = cast::transmute(dst); + let old = cast::transmute(old); + let new = cast::transmute(new); + + cast::transmute(match order { + Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), + Release => intrinsics::atomic_cxchg_rel(dst, old, new), + AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new), + Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new), + _ => intrinsics::atomic_cxchg(dst, old, new), + }) +} + +#[inline] +pub unsafe fn atomic_and(dst: &mut T, val: T, order: Ordering) -> T { + let dst = cast::transmute(dst); + let val = cast::transmute(val); + + cast::transmute(match order { + Acquire => intrinsics::atomic_and_acq(dst, val), + Release => intrinsics::atomic_and_rel(dst, val), + AcqRel => intrinsics::atomic_and_acqrel(dst, val), + Relaxed => intrinsics::atomic_and_relaxed(dst, val), + _ => intrinsics::atomic_and(dst, val) + }) +} + + +#[inline] +pub unsafe fn atomic_nand(dst: &mut T, val: T, order: Ordering) -> T { + let dst = cast::transmute(dst); + let val = cast::transmute(val); + + cast::transmute(match order { + Acquire => intrinsics::atomic_nand_acq(dst, val), + Release => intrinsics::atomic_nand_rel(dst, val), + AcqRel => intrinsics::atomic_nand_acqrel(dst, val), + Relaxed => intrinsics::atomic_nand_relaxed(dst, val), + _ => intrinsics::atomic_nand(dst, val) + }) +} + + +#[inline] +pub unsafe fn atomic_or(dst: &mut T, val: T, order: Ordering) -> T { + let dst = cast::transmute(dst); + let val = cast::transmute(val); + + cast::transmute(match order { + Acquire => intrinsics::atomic_or_acq(dst, val), + Release => intrinsics::atomic_or_rel(dst, val), + AcqRel => intrinsics::atomic_or_acqrel(dst, val), + Relaxed => intrinsics::atomic_or_relaxed(dst, val), + _ => intrinsics::atomic_or(dst, val) + }) +} + + +#[inline] +pub unsafe fn atomic_xor(dst: &mut T, val: T, order: Ordering) -> T { + let dst = cast::transmute(dst); + let val = cast::transmute(val); + + cast::transmute(match order { + Acquire => intrinsics::atomic_xor_acq(dst, val), + Release => intrinsics::atomic_xor_rel(dst, val), + AcqRel => intrinsics::atomic_xor_acqrel(dst, val), + Relaxed => intrinsics::atomic_xor_relaxed(dst, val), + _ => intrinsics::atomic_xor(dst, val) + }) +} + + +/** + * An atomic fence. + * + * A fence 'A' which has `Release` ordering semantics, synchronizes with a + * fence 'B' with (at least) `Acquire` semantics, if and only if there exists + * atomic operations X and Y, both operating on some atomic object 'M' such + * that A is sequenced before X, Y is synchronized before B and Y observers + * the change to M. This provides a happens-before dependence between A and B. + * + * Atomic operations with `Release` or `Acquire` semantics can also synchronize + * with a fence. + * + * A fence with has `SeqCst` ordering, in addition to having both `Acquire` and + * `Release` semantics, participates in the global program order of the other + * `SeqCst` operations and/or fences. + * + * Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings. + */ +#[inline] +pub fn fence(order: Ordering) { + unsafe { + match order { + Acquire => intrinsics::atomic_fence_acq(), + Release => intrinsics::atomic_fence_rel(), + AcqRel => intrinsics::atomic_fence_rel(), + _ => intrinsics::atomic_fence(), + } + } +} + +#[cfg(test)] +mod test { + use option::*; + use super::*; + + #[test] + fn flag() { + let mut flg = AtomicFlag::new(); + assert!(!flg.test_and_set(SeqCst)); + assert!(flg.test_and_set(SeqCst)); + + flg.clear(SeqCst); + assert!(!flg.test_and_set(SeqCst)); + } + + #[test] + fn option_empty() { + let mut option: AtomicOption<()> = AtomicOption::empty(); + assert!(option.is_empty(SeqCst)); + } + + #[test] + fn option_swap() { + let mut p = AtomicOption::new(~1); + let a = ~2; + + let b = p.swap(a, SeqCst); + + assert_eq!(b, Some(~1)); + assert_eq!(p.take(SeqCst), Some(~2)); + } + + #[test] + fn option_take() { + let mut p = AtomicOption::new(~1); + + assert_eq!(p.take(SeqCst), Some(~1)); + assert_eq!(p.take(SeqCst), None); + + let p2 = ~2; + p.swap(p2, SeqCst); + + assert_eq!(p.take(SeqCst), Some(~2)); + } + + #[test] + fn option_fill() { + let mut p = AtomicOption::new(~1); + assert!(p.fill(~2, SeqCst).is_some()); // should fail; shouldn't leak! + assert_eq!(p.take(SeqCst), Some(~1)); + + assert!(p.fill(~2, SeqCst).is_none()); // shouldn't fail + assert_eq!(p.take(SeqCst), Some(~2)); + } + + #[test] + fn bool_and() { + let mut a = AtomicBool::new(true); + assert_eq!(a.fetch_and(false, SeqCst),true); + assert_eq!(a.load(SeqCst),false); + } + + static mut S_FLAG : AtomicFlag = INIT_ATOMIC_FLAG; + static mut S_BOOL : AtomicBool = INIT_ATOMIC_BOOL; + static mut S_INT : AtomicInt = INIT_ATOMIC_INT; + static mut S_UINT : AtomicUint = INIT_ATOMIC_UINT; + + #[test] + fn static_init() { + unsafe { + assert!(!S_FLAG.test_and_set(SeqCst)); + assert!(!S_BOOL.load(SeqCst)); + assert!(S_INT.load(SeqCst) == 0); + assert!(S_UINT.load(SeqCst) == 0); + } + } +} diff --git a/src/libstd/sync/deque.rs b/src/libstd/sync/deque.rs new file mode 100644 index 00000000000..4d0efcd6ee1 --- /dev/null +++ b/src/libstd/sync/deque.rs @@ -0,0 +1,661 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A (mostly) lock-free concurrent work-stealing deque +//! +//! This module contains an implementation of the Chase-Lev work stealing deque +//! described in "Dynamic Circular Work-Stealing Deque". The implementation is +//! heavily based on the pseudocode found in the paper. +//! +//! This implementation does not want to have the restriction of a garbage +//! collector for reclamation of buffers, and instead it uses a shared pool of +//! buffers. This shared pool is required for correctness in this +//! implementation. +//! +//! The only lock-synchronized portions of this deque are the buffer allocation +//! and deallocation portions. Otherwise all operations are lock-free. +//! +//! # Example +//! +//! use std::rt::deque::BufferPool; +//! +//! let mut pool = BufferPool::new(); +//! let (mut worker, mut stealer) = pool.deque(); +//! +//! // Only the worker may push/pop +//! worker.push(1); +//! worker.pop(); +//! +//! // Stealers take data from the other end of the deque +//! worker.push(1); +//! stealer.steal(); +//! +//! // Stealers can be cloned to have many stealers stealing in parallel +//! worker.push(1); +//! let mut stealer2 = stealer.clone(); +//! stealer2.steal(); + +// NB: the "buffer pool" strategy is not done for speed, but rather for +// correctness. For more info, see the comment on `swap_buffer` + +// XXX: all atomic operations in this module use a SeqCst ordering. That is +// probably overkill + +use cast; +use clone::Clone; +use iter::{range, Iterator}; +use kinds::Send; +use libc; +use mem; +use ops::Drop; +use option::{Option, Some, None}; +use ptr; +use ptr::RawPtr; +use sync::arc::UnsafeArc; +use sync::atomics::{AtomicInt, AtomicPtr, SeqCst}; +use unstable::sync::Exclusive; +use vec::{OwnedVector, ImmutableVector}; + +// Once the queue is less than 1/K full, then it will be downsized. Note that +// the deque requires that this number be less than 2. +static K: int = 4; + +// Minimum number of bits that a buffer size should be. No buffer will resize to +// under this value, and all deques will initially contain a buffer of this +// size. +// +// The size in question is 1 << MIN_BITS +static MIN_BITS: int = 7; + +struct Deque { + bottom: AtomicInt, + top: AtomicInt, + array: AtomicPtr>, + pool: BufferPool, +} + +/// Worker half of the work-stealing deque. This worker has exclusive access to +/// one side of the deque, and uses `push` and `pop` method to manipulate it. +/// +/// There may only be one worker per deque. +pub struct Worker { + priv deque: UnsafeArc>, +} + +/// The stealing half of the work-stealing deque. Stealers have access to the +/// opposite end of the deque from the worker, and they only have access to the +/// `steal` method. +pub struct Stealer { + priv deque: UnsafeArc>, +} + +/// When stealing some data, this is an enumeration of the possible outcomes. +#[deriving(Eq)] +pub enum Stolen { + /// The deque was empty at the time of stealing + Empty, + /// The stealer lost the race for stealing data, and a retry may return more + /// data. + Abort, + /// The stealer has successfully stolen some data. + Data(T), +} + +/// The allocation pool for buffers used by work-stealing deques. Right now this +/// structure is used for reclamation of memory after it is no longer in use by +/// deques. +/// +/// This data structure is protected by a mutex, but it is rarely used. Deques +/// will only use this structure when allocating a new buffer or deallocating a +/// previous one. +pub struct BufferPool { + priv pool: Exclusive<~[~Buffer]>, +} + +/// An internal buffer used by the chase-lev deque. This structure is actually +/// implemented as a circular buffer, and is used as the intermediate storage of +/// the data in the deque. +/// +/// This type is implemented with *T instead of ~[T] for two reasons: +/// +/// 1. There is nothing safe about using this buffer. This easily allows the +/// same value to be read twice in to rust, and there is nothing to +/// prevent this. The usage by the deque must ensure that one of the +/// values is forgotten. Furthermore, we only ever want to manually run +/// destructors for values in this buffer (on drop) because the bounds +/// are defined by the deque it's owned by. +/// +/// 2. We can certainly avoid bounds checks using *T instead of ~[T], although +/// LLVM is probably pretty good at doing this already. +struct Buffer { + storage: *T, + log_size: int, +} + +impl BufferPool { + /// Allocates a new buffer pool which in turn can be used to allocate new + /// deques. + pub fn new() -> BufferPool { + BufferPool { pool: Exclusive::new(~[]) } + } + + /// Allocates a new work-stealing deque which will send/receiving memory to + /// and from this buffer pool. + pub fn deque(&mut self) -> (Worker, Stealer) { + let (a, b) = UnsafeArc::new2(Deque::new(self.clone())); + (Worker { deque: a }, Stealer { deque: b }) + } + + fn alloc(&mut self, bits: int) -> ~Buffer { + unsafe { + self.pool.with(|pool| { + match pool.iter().position(|x| x.size() >= (1 << bits)) { + Some(i) => pool.remove(i), + None => ~Buffer::new(bits) + } + }) + } + } + + fn free(&mut self, buf: ~Buffer) { + unsafe { + let mut buf = Some(buf); + self.pool.with(|pool| { + let buf = buf.take_unwrap(); + match pool.iter().position(|v| v.size() > buf.size()) { + Some(i) => pool.insert(i, buf), + None => pool.push(buf), + } + }) + } + } +} + +impl Clone for BufferPool { + fn clone(&self) -> BufferPool { BufferPool { pool: self.pool.clone() } } +} + +impl Worker { + /// Pushes data onto the front of this work queue. + pub fn push(&mut self, t: T) { + unsafe { (*self.deque.get()).push(t) } + } + /// Pops data off the front of the work queue, returning `None` on an empty + /// queue. + pub fn pop(&mut self) -> Option { + unsafe { (*self.deque.get()).pop() } + } + + /// Gets access to the buffer pool that this worker is attached to. This can + /// be used to create more deques which share the same buffer pool as this + /// deque. + pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool { + unsafe { &mut (*self.deque.get()).pool } + } +} + +impl Stealer { + /// Steals work off the end of the queue (opposite of the worker's end) + pub fn steal(&mut self) -> Stolen { + unsafe { (*self.deque.get()).steal() } + } + + /// Gets access to the buffer pool that this stealer is attached to. This + /// can be used to create more deques which share the same buffer pool as + /// this deque. + pub fn pool<'a>(&'a mut self) -> &'a mut BufferPool { + unsafe { &mut (*self.deque.get()).pool } + } +} + +impl Clone for Stealer { + fn clone(&self) -> Stealer { Stealer { deque: self.deque.clone() } } +} + +// Almost all of this code can be found directly in the paper so I'm not +// personally going to heavily comment what's going on here. + +impl Deque { + fn new(mut pool: BufferPool) -> Deque { + let buf = pool.alloc(MIN_BITS); + Deque { + bottom: AtomicInt::new(0), + top: AtomicInt::new(0), + array: AtomicPtr::new(unsafe { cast::transmute(buf) }), + pool: pool, + } + } + + unsafe fn push(&mut self, data: T) { + let mut b = self.bottom.load(SeqCst); + let t = self.top.load(SeqCst); + let mut a = self.array.load(SeqCst); + let size = b - t; + if size >= (*a).size() - 1 { + // You won't find this code in the chase-lev deque paper. This is + // alluded to in a small footnote, however. We always free a buffer + // when growing in order to prevent leaks. + a = self.swap_buffer(b, a, (*a).resize(b, t, 1)); + b = self.bottom.load(SeqCst); + } + (*a).put(b, data); + self.bottom.store(b + 1, SeqCst); + } + + unsafe fn pop(&mut self) -> Option { + let b = self.bottom.load(SeqCst); + let a = self.array.load(SeqCst); + let b = b - 1; + self.bottom.store(b, SeqCst); + let t = self.top.load(SeqCst); + let size = b - t; + if size < 0 { + self.bottom.store(t, SeqCst); + return None; + } + let data = (*a).get(b); + if size > 0 { + self.maybe_shrink(b, t); + return Some(data); + } + if self.top.compare_and_swap(t, t + 1, SeqCst) == t { + self.bottom.store(t + 1, SeqCst); + return Some(data); + } else { + self.bottom.store(t + 1, SeqCst); + cast::forget(data); // someone else stole this value + return None; + } + } + + unsafe fn steal(&mut self) -> Stolen { + let t = self.top.load(SeqCst); + let old = self.array.load(SeqCst); + let b = self.bottom.load(SeqCst); + let a = self.array.load(SeqCst); + let size = b - t; + if size <= 0 { return Empty } + if size % (*a).size() == 0 { + if a == old && t == self.top.load(SeqCst) { + return Empty + } + return Abort + } + let data = (*a).get(t); + if self.top.compare_and_swap(t, t + 1, SeqCst) == t { + Data(data) + } else { + cast::forget(data); // someone else stole this value + Abort + } + } + + unsafe fn maybe_shrink(&mut self, b: int, t: int) { + let a = self.array.load(SeqCst); + if b - t < (*a).size() / K && b - t > (1 << MIN_BITS) { + self.swap_buffer(b, a, (*a).resize(b, t, -1)); + } + } + + // Helper routine not mentioned in the paper which is used in growing and + // shrinking buffers to swap in a new buffer into place. As a bit of a + // recap, the whole point that we need a buffer pool rather than just + // calling malloc/free directly is that stealers can continue using buffers + // after this method has called 'free' on it. The continued usage is simply + // a read followed by a forget, but we must make sure that the memory can + // continue to be read after we flag this buffer for reclamation. + unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer, + buf: Buffer) -> *mut Buffer { + let newbuf: *mut Buffer = cast::transmute(~buf); + self.array.store(newbuf, SeqCst); + let ss = (*newbuf).size(); + self.bottom.store(b + ss, SeqCst); + let t = self.top.load(SeqCst); + if self.top.compare_and_swap(t, t + ss, SeqCst) != t { + self.bottom.store(b, SeqCst); + } + self.pool.free(cast::transmute(old)); + return newbuf; + } +} + + +#[unsafe_destructor] +impl Drop for Deque { + fn drop(&mut self) { + let t = self.top.load(SeqCst); + let b = self.bottom.load(SeqCst); + let a = self.array.load(SeqCst); + // Free whatever is leftover in the dequeue, and then move the buffer + // back into the pool. + for i in range(t, b) { + let _: T = unsafe { (*a).get(i) }; + } + self.pool.free(unsafe { cast::transmute(a) }); + } +} + +impl Buffer { + unsafe fn new(log_size: int) -> Buffer { + let size = (1 << log_size) * mem::size_of::(); + let buffer = libc::malloc(size as libc::size_t); + assert!(!buffer.is_null()); + Buffer { + storage: buffer as *T, + log_size: log_size, + } + } + + fn size(&self) -> int { 1 << self.log_size } + + // Apparently LLVM cannot optimize (foo % (1 << bar)) into this implicitly + fn mask(&self) -> int { (1 << self.log_size) - 1 } + + // This does not protect against loading duplicate values of the same cell, + // nor does this clear out the contents contained within. Hence, this is a + // very unsafe method which the caller needs to treat specially in case a + // race is lost. + unsafe fn get(&self, i: int) -> T { + ptr::read_ptr(self.storage.offset(i & self.mask())) + } + + // Unsafe because this unsafely overwrites possibly uninitialized or + // initialized data. + unsafe fn put(&mut self, i: int, t: T) { + let ptr = self.storage.offset(i & self.mask()); + ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1); + cast::forget(t); + } + + // Again, unsafe because this has incredibly dubious ownership violations. + // It is assumed that this buffer is immediately dropped. + unsafe fn resize(&self, b: int, t: int, delta: int) -> Buffer { + let mut buf = Buffer::new(self.log_size + delta); + for i in range(t, b) { + buf.put(i, self.get(i)); + } + return buf; + } +} + +#[unsafe_destructor] +impl Drop for Buffer { + fn drop(&mut self) { + // It is assumed that all buffers are empty on drop. + unsafe { libc::free(self.storage as *libc::c_void) } + } +} + +#[cfg(test)] +mod tests { + use prelude::*; + use super::{Data, BufferPool, Abort, Empty, Worker, Stealer}; + + use cast; + use rt::thread::Thread; + use rand; + use rand::Rng; + use sync::atomics::{AtomicBool, INIT_ATOMIC_BOOL, SeqCst, + AtomicUint, INIT_ATOMIC_UINT}; + use vec; + + #[test] + fn smoke() { + let mut pool = BufferPool::new(); + let (mut w, mut s) = pool.deque(); + assert_eq!(w.pop(), None); + assert_eq!(s.steal(), Empty); + w.push(1); + assert_eq!(w.pop(), Some(1)); + w.push(1); + assert_eq!(s.steal(), Data(1)); + w.push(1); + assert_eq!(s.clone().steal(), Data(1)); + } + + #[test] + fn stealpush() { + static AMT: int = 100000; + let mut pool = BufferPool::::new(); + let (mut w, s) = pool.deque(); + let t = do Thread::start { + let mut s = s; + let mut left = AMT; + while left > 0 { + match s.steal() { + Data(i) => { + assert_eq!(i, 1); + left -= 1; + } + Abort | Empty => {} + } + } + }; + + for _ in range(0, AMT) { + w.push(1); + } + + t.join(); + } + + #[test] + fn stealpush_large() { + static AMT: int = 100000; + let mut pool = BufferPool::<(int, int)>::new(); + let (mut w, s) = pool.deque(); + let t = do Thread::start { + let mut s = s; + let mut left = AMT; + while left > 0 { + match s.steal() { + Data((1, 10)) => { left -= 1; } + Data(..) => fail!(), + Abort | Empty => {} + } + } + }; + + for _ in range(0, AMT) { + w.push((1, 10)); + } + + t.join(); + } + + fn stampede(mut w: Worker<~int>, s: Stealer<~int>, + nthreads: int, amt: uint) { + for _ in range(0, amt) { + w.push(~20); + } + let mut remaining = AtomicUint::new(amt); + let unsafe_remaining: *mut AtomicUint = &mut remaining; + + let threads = range(0, nthreads).map(|_| { + let s = s.clone(); + do Thread::start { + unsafe { + let mut s = s; + while (*unsafe_remaining).load(SeqCst) > 0 { + match s.steal() { + Data(~20) => { + (*unsafe_remaining).fetch_sub(1, SeqCst); + } + Data(..) => fail!(), + Abort | Empty => {} + } + } + } + } + }).to_owned_vec(); + + while remaining.load(SeqCst) > 0 { + match w.pop() { + Some(~20) => { remaining.fetch_sub(1, SeqCst); } + Some(..) => fail!(), + None => {} + } + } + + for thread in threads.move_iter() { + thread.join(); + } + } + + #[test] + fn run_stampede() { + let mut pool = BufferPool::<~int>::new(); + let (w, s) = pool.deque(); + stampede(w, s, 8, 10000); + } + + #[test] + fn many_stampede() { + static AMT: uint = 4; + let mut pool = BufferPool::<~int>::new(); + let threads = range(0, AMT).map(|_| { + let (w, s) = pool.deque(); + do Thread::start { + stampede(w, s, 4, 10000); + } + }).to_owned_vec(); + + for thread in threads.move_iter() { + thread.join(); + } + } + + #[test] + fn stress() { + static AMT: int = 100000; + static NTHREADS: int = 8; + static mut DONE: AtomicBool = INIT_ATOMIC_BOOL; + static mut HITS: AtomicUint = INIT_ATOMIC_UINT; + let mut pool = BufferPool::::new(); + let (mut w, s) = pool.deque(); + + let threads = range(0, NTHREADS).map(|_| { + let s = s.clone(); + do Thread::start { + unsafe { + let mut s = s; + loop { + match s.steal() { + Data(2) => { HITS.fetch_add(1, SeqCst); } + Data(..) => fail!(), + _ if DONE.load(SeqCst) => break, + _ => {} + } + } + } + } + }).to_owned_vec(); + + let mut rng = rand::task_rng(); + let mut expected = 0; + while expected < AMT { + if rng.gen_range(0, 3) == 2 { + match w.pop() { + None => {} + Some(2) => unsafe { HITS.fetch_add(1, SeqCst); }, + Some(_) => fail!(), + } + } else { + expected += 1; + w.push(2); + } + } + + unsafe { + while HITS.load(SeqCst) < AMT as uint { + match w.pop() { + None => {} + Some(2) => { HITS.fetch_add(1, SeqCst); }, + Some(_) => fail!(), + } + } + DONE.store(true, SeqCst); + } + + for thread in threads.move_iter() { + thread.join(); + } + + assert_eq!(unsafe { HITS.load(SeqCst) }, expected as uint); + } + + #[test] + #[ignore(cfg(windows))] // apparently windows scheduling is weird? + fn no_starvation() { + static AMT: int = 10000; + static NTHREADS: int = 4; + static mut DONE: AtomicBool = INIT_ATOMIC_BOOL; + let mut pool = BufferPool::<(int, uint)>::new(); + let (mut w, s) = pool.deque(); + + let (threads, hits) = vec::unzip(range(0, NTHREADS).map(|_| { + let s = s.clone(); + let unique_box = ~AtomicUint::new(0); + let thread_box = unsafe { + *cast::transmute::<&~AtomicUint,**mut AtomicUint>(&unique_box) + }; + (do Thread::start { + unsafe { + let mut s = s; + loop { + match s.steal() { + Data((1, 2)) => { + (*thread_box).fetch_add(1, SeqCst); + } + Data(..) => fail!(), + _ if DONE.load(SeqCst) => break, + _ => {} + } + } + } + }, unique_box) + })); + + let mut rng = rand::task_rng(); + let mut myhit = false; + let mut iter = 0; + 'outer: loop { + for _ in range(0, rng.gen_range(0, AMT)) { + if !myhit && rng.gen_range(0, 3) == 2 { + match w.pop() { + None => {} + Some((1, 2)) => myhit = true, + Some(_) => fail!(), + } + } else { + w.push((1, 2)); + } + } + iter += 1; + + debug!("loop iteration {}", iter); + for (i, slot) in hits.iter().enumerate() { + let amt = slot.load(SeqCst); + debug!("thread {}: {}", i, amt); + if amt == 0 { continue 'outer; } + } + if myhit { + break + } + } + + unsafe { DONE.store(true, SeqCst); } + + for thread in threads.move_iter() { + thread.join(); + } + } +} + diff --git a/src/libstd/sync/mod.rs b/src/libstd/sync/mod.rs new file mode 100644 index 00000000000..3213c538152 --- /dev/null +++ b/src/libstd/sync/mod.rs @@ -0,0 +1,23 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Useful synchronization primitives +//! +//! This modules contains useful safe and unsafe synchronization primitives. +//! Most of the primitives in this module do not provide any sort of locking +//! and/or blocking at all, but rather provide the necessary tools to build +//! other types of concurrent primitives. + +pub mod arc; +pub mod atomics; +pub mod deque; +pub mod mpmc_bounded_queue; +pub mod mpsc_queue; +pub mod spsc_queue; diff --git a/src/libstd/sync/mpmc_bounded_queue.rs b/src/libstd/sync/mpmc_bounded_queue.rs new file mode 100644 index 00000000000..b623976306d --- /dev/null +++ b/src/libstd/sync/mpmc_bounded_queue.rs @@ -0,0 +1,211 @@ +/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation are + * those of the authors and should not be interpreted as representing official + * policies, either expressed or implied, of Dmitry Vyukov. + */ + +#[allow(missing_doc, dead_code)]; + +// http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue + +use clone::Clone; +use kinds::Send; +use num::{Exponential,Algebraic,Round}; +use option::{Option, Some, None}; +use sync::arc::UnsafeArc; +use sync::atomics::{AtomicUint,Relaxed,Release,Acquire}; +use vec; + +struct Node { + sequence: AtomicUint, + value: Option, +} + +struct State { + pad0: [u8, ..64], + buffer: ~[Node], + mask: uint, + pad1: [u8, ..64], + enqueue_pos: AtomicUint, + pad2: [u8, ..64], + dequeue_pos: AtomicUint, + pad3: [u8, ..64], +} + +pub struct Queue { + priv state: UnsafeArc>, +} + +impl State { + fn with_capacity(capacity: uint) -> State { + let capacity = if capacity < 2 || (capacity & (capacity - 1)) != 0 { + if capacity < 2 { + 2u + } else { + // use next power of 2 as capacity + 2f64.pow(&((capacity as f64).log2().ceil())) as uint + } + } else { + capacity + }; + let buffer = vec::from_fn(capacity, |i:uint| { + Node{sequence:AtomicUint::new(i),value:None} + }); + State{ + pad0: [0, ..64], + buffer: buffer, + mask: capacity-1, + pad1: [0, ..64], + enqueue_pos: AtomicUint::new(0), + pad2: [0, ..64], + dequeue_pos: AtomicUint::new(0), + pad3: [0, ..64], + } + } + + fn push(&mut self, value: T) -> bool { + let mask = self.mask; + let mut pos = self.enqueue_pos.load(Relaxed); + loop { + let node = &mut self.buffer[pos & mask]; + let seq = node.sequence.load(Acquire); + let diff: int = seq as int - pos as int; + + if diff == 0 { + let enqueue_pos = self.enqueue_pos.compare_and_swap(pos, pos+1, Relaxed); + if enqueue_pos == pos { + node.value = Some(value); + node.sequence.store(pos+1, Release); + break + } else { + pos = enqueue_pos; + } + } else if (diff < 0) { + return false + } else { + pos = self.enqueue_pos.load(Relaxed); + } + } + true + } + + fn pop(&mut self) -> Option { + let mask = self.mask; + let mut pos = self.dequeue_pos.load(Relaxed); + loop { + let node = &mut self.buffer[pos & mask]; + let seq = node.sequence.load(Acquire); + let diff: int = seq as int - (pos + 1) as int; + if diff == 0 { + let dequeue_pos = self.dequeue_pos.compare_and_swap(pos, pos+1, Relaxed); + if dequeue_pos == pos { + let value = node.value.take(); + node.sequence.store(pos + mask + 1, Release); + return value + } else { + pos = dequeue_pos; + } + } else if diff < 0 { + return None + } else { + pos = self.dequeue_pos.load(Relaxed); + } + } + } +} + +impl Queue { + pub fn with_capacity(capacity: uint) -> Queue { + Queue{ + state: UnsafeArc::new(State::with_capacity(capacity)) + } + } + + pub fn push(&mut self, value: T) -> bool { + unsafe { (*self.state.get()).push(value) } + } + + pub fn pop(&mut self) -> Option { + unsafe { (*self.state.get()).pop() } + } +} + +impl Clone for Queue { + fn clone(&self) -> Queue { + Queue { + state: self.state.clone() + } + } +} + +#[cfg(test)] +mod tests { + use prelude::*; + use option::*; + use task; + use super::Queue; + + #[test] + fn test() { + let nthreads = 8u; + let nmsgs = 1000u; + let mut q = Queue::with_capacity(nthreads*nmsgs); + assert_eq!(None, q.pop()); + + for _ in range(0, nthreads) { + let q = q.clone(); + do task::spawn_sched(task::SingleThreaded) { + let mut q = q; + for i in range(0, nmsgs) { + assert!(q.push(i)); + } + } + } + + let mut completion_ports = ~[]; + for _ in range(0, nthreads) { + let (completion_port, completion_chan) = Chan::new(); + completion_ports.push(completion_port); + let q = q.clone(); + do task::spawn_sched(task::SingleThreaded) { + let mut q = q; + let mut i = 0u; + loop { + match q.pop() { + None => {}, + Some(_) => { + i += 1; + if i == nmsgs { break } + } + } + } + completion_chan.send(i); + } + } + + for completion_port in completion_ports.mut_iter() { + assert_eq!(nmsgs, completion_port.recv()); + } + } +} diff --git a/src/libstd/sync/mpsc_queue.rs b/src/libstd/sync/mpsc_queue.rs new file mode 100644 index 00000000000..89e56e3fa67 --- /dev/null +++ b/src/libstd/sync/mpsc_queue.rs @@ -0,0 +1,245 @@ +/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation are + * those of the authors and should not be interpreted as representing official + * policies, either expressed or implied, of Dmitry Vyukov. + */ + +//! A mostly lock-free multi-producer, single consumer queue. +//! +//! This module contains an implementation of a concurrent MPSC queue. This +//! queue can be used to share data between tasks, and is also used as the +//! building block of channels in rust. +//! +//! Note that the current implementation of this queue has a caveat of the `pop` +//! method, and see the method for more information about it. Due to this +//! caveat, this queue may not be appropriate for all use-cases. + +// http://www.1024cores.net/home/lock-free-algorithms +// /queues/non-intrusive-mpsc-node-based-queue + +use cast; +use clone::Clone; +use kinds::Send; +use ops::Drop; +use option::{Option, None, Some}; +use ptr::RawPtr; +use sync::arc::UnsafeArc; +use sync::atomics::{AtomicPtr, Release, Acquire, AcqRel, Relaxed}; + +/// A result of the `pop` function. +pub enum PopResult { + /// Some data has been popped + Data(T), + /// The queue is empty + Empty, + /// The queue is in an inconsistent state. Popping data should succeed, but + /// some pushers have yet to make enough progress in order allow a pop to + /// succeed. It is recommended that a pop() occur "in the near future" in + /// order to see if the sender has made progress or not + Inconsistent, +} + +struct Node { + next: AtomicPtr>, + value: Option, +} + +struct State { + head: AtomicPtr>, + tail: *mut Node, + packet: P, +} + +/// The consumer half of this concurrent queue. This half is used to receive +/// data from the producers. +pub struct Consumer { + priv state: UnsafeArc>, +} + +/// The production half of the concurrent queue. This handle may be cloned in +/// order to make handles for new producers. +pub struct Producer { + priv state: UnsafeArc>, +} + +impl Clone for Producer { + fn clone(&self) -> Producer { + Producer { state: self.state.clone() } + } +} + +/// Creates a new MPSC queue. The given argument `p` is a user-defined "packet" +/// of information which will be shared by the consumer and the producer which +/// can be re-acquired via the `packet` function. This is helpful when extra +/// state is shared between the producer and consumer, but note that there is no +/// synchronization performed of this data. +pub fn queue(p: P) -> (Consumer, Producer) { + unsafe { + let (a, b) = UnsafeArc::new2(State::new(p)); + (Consumer { state: a }, Producer { state: b }) + } +} + +impl Node { + unsafe fn new(v: Option) -> *mut Node { + cast::transmute(~Node { + next: AtomicPtr::new(0 as *mut Node), + value: v, + }) + } +} + +impl State { + unsafe fn new(p: P) -> State { + let stub = Node::new(None); + State { + head: AtomicPtr::new(stub), + tail: stub, + packet: p, + } + } + + unsafe fn push(&mut self, t: T) { + let n = Node::new(Some(t)); + let prev = self.head.swap(n, AcqRel); + (*prev).next.store(n, Release); + } + + unsafe fn pop(&mut self) -> PopResult { + let tail = self.tail; + let next = (*tail).next.load(Acquire); + + if !next.is_null() { + self.tail = next; + assert!((*tail).value.is_none()); + assert!((*next).value.is_some()); + let ret = (*next).value.take_unwrap(); + let _: ~Node = cast::transmute(tail); + return Data(ret); + } + + if self.head.load(Acquire) == tail {Empty} else {Inconsistent} + } +} + +#[unsafe_destructor] +impl Drop for State { + fn drop(&mut self) { + unsafe { + let mut cur = self.tail; + while !cur.is_null() { + let next = (*cur).next.load(Relaxed); + let _: ~Node = cast::transmute(cur); + cur = next; + } + } + } +} + +impl Producer { + /// Pushes a new value onto this queue. + pub fn push(&mut self, value: T) { + unsafe { (*self.state.get()).push(value) } + } + /// Gets an unsafe pointer to the user-defined packet shared by the + /// producers and the consumer. Note that care must be taken to ensure that + /// the lifetime of the queue outlives the usage of the returned pointer. + pub unsafe fn packet(&self) -> *mut P { + &mut (*self.state.get()).packet as *mut P + } +} + +impl Consumer { + /// Pops some data from this queue. + /// + /// Note that the current implementation means that this function cannot + /// return `Option`. It is possible for this queue to be in an + /// inconsistent state where many pushes have suceeded and completely + /// finished, but pops cannot return `Some(t)`. This inconsistent state + /// happens when a pusher is pre-empted at an inopportune moment. + /// + /// This inconsistent state means that this queue does indeed have data, but + /// it does not currently have access to it at this time. + pub fn pop(&mut self) -> PopResult { + unsafe { (*self.state.get()).pop() } + } + /// Attempts to pop data from this queue, but doesn't attempt too hard. This + /// will canonicalize inconsistent states to a `None` value. + pub fn casual_pop(&mut self) -> Option { + match self.pop() { + Data(t) => Some(t), + Empty | Inconsistent => None, + } + } + /// Gets an unsafe pointer to the underlying user-defined packet. See + /// `Producer.packet` for more information. + pub unsafe fn packet(&self) -> *mut P { + &mut (*self.state.get()).packet as *mut P + } +} + +#[cfg(test)] +mod tests { + use prelude::*; + + use task; + use super::{queue, Data, Empty, Inconsistent}; + + #[test] + fn test_full() { + let (_, mut p) = queue(()); + p.push(~1); + p.push(~2); + } + + #[test] + fn test() { + let nthreads = 8u; + let nmsgs = 1000u; + let (mut c, p) = queue(()); + match c.pop() { + Empty => {} + Inconsistent | Data(..) => fail!() + } + + for _ in range(0, nthreads) { + let q = p.clone(); + do task::spawn_sched(task::SingleThreaded) { + let mut q = q; + for i in range(0, nmsgs) { + q.push(i); + } + } + } + + let mut i = 0u; + while i < nthreads * nmsgs { + match c.pop() { + Empty | Inconsistent => {}, + Data(_) => { i += 1 } + } + } + } +} + diff --git a/src/libstd/sync/spsc_queue.rs b/src/libstd/sync/spsc_queue.rs new file mode 100644 index 00000000000..c4abba04659 --- /dev/null +++ b/src/libstd/sync/spsc_queue.rs @@ -0,0 +1,334 @@ +/* Copyright (c) 2010-2011 Dmitry Vyukov. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY DMITRY VYUKOV "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT + * SHALL DMITRY VYUKOV OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF + * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation are + * those of the authors and should not be interpreted as representing official + * policies, either expressed or implied, of Dmitry Vyukov. + */ + +// http://www.1024cores.net/home/lock-free-algorithms/queues/unbounded-spsc-queue + +//! A single-producer single-consumer concurrent queue +//! +//! This module contains the implementation of an SPSC queue which can be used +//! concurrently between two tasks. This data structure is safe to use and +//! enforces the semantics that there is one pusher and one popper. + +use cast; +use kinds::Send; +use ops::Drop; +use option::{Some, None, Option}; +use ptr::RawPtr; +use sync::arc::UnsafeArc; +use sync::atomics::{AtomicPtr, Relaxed, AtomicUint, Acquire, Release}; + +// Node within the linked list queue of messages to send +struct Node { + // XXX: this could be an uninitialized T if we're careful enough, and + // that would reduce memory usage (and be a bit faster). + // is it worth it? + value: Option, // nullable for re-use of nodes + next: AtomicPtr>, // next node in the queue +} + +// The producer/consumer halves both need access to the `tail` field, and if +// they both have access to that we may as well just give them both access +// to this whole structure. +struct State { + // consumer fields + tail: *mut Node, // where to pop from + tail_prev: AtomicPtr>, // where to pop from + + // producer fields + head: *mut Node, // where to push to + first: *mut Node, // where to get new nodes from + tail_copy: *mut Node, // between first/tail + + // Cache maintenance fields. Additions and subtractions are stored + // separately in order to allow them to use nonatomic addition/subtraction. + cache_bound: uint, + cache_additions: AtomicUint, + cache_subtractions: AtomicUint, + + packet: P, +} + +/// Producer half of this queue. This handle is used to push data to the +/// consumer. +pub struct Producer { + priv state: UnsafeArc>, +} + +/// Consumer half of this queue. This handle is used to receive data from the +/// producer. +pub struct Consumer { + priv state: UnsafeArc>, +} + +/// Creates a new queue. The producer returned is connected to the consumer to +/// push all data to the consumer. +/// +/// # Arguments +/// +/// * `bound` - This queue implementation is implemented with a linked list, +/// and this means that a push is always a malloc. In order to +/// amortize this cost, an internal cache of nodes is maintained +/// to prevent a malloc from always being necessary. This bound is +/// the limit on the size of the cache (if desired). If the value +/// is 0, then the cache has no bound. Otherwise, the cache will +/// never grow larger than `bound` (although the queue itself +/// could be much larger. +/// +/// * `p` - This is the user-defined packet of data which will also be shared +/// between the producer and consumer. +pub fn queue(bound: uint, + p: P) -> (Consumer, Producer) +{ + let n1 = Node::new(); + let n2 = Node::new(); + unsafe { (*n1).next.store(n2, Relaxed) } + let state = State { + tail: n2, + tail_prev: AtomicPtr::new(n1), + head: n2, + first: n1, + tail_copy: n1, + cache_bound: bound, + cache_additions: AtomicUint::new(0), + cache_subtractions: AtomicUint::new(0), + packet: p, + }; + let (arc1, arc2) = UnsafeArc::new2(state); + (Consumer { state: arc1 }, Producer { state: arc2 }) +} + +impl Node { + fn new() -> *mut Node { + unsafe { + cast::transmute(~Node { + value: None, + next: AtomicPtr::new(0 as *mut Node), + }) + } + } +} + +impl Producer { + /// Pushes data onto the queue + pub fn push(&mut self, t: T) { + unsafe { (*self.state.get()).push(t) } + } + /// Tests whether the queue is empty. Note that if this function returns + /// `false`, the return value is significant, but if the return value is + /// `true` then almost no meaning can be attached to the return value. + pub fn is_empty(&self) -> bool { + unsafe { (*self.state.get()).is_empty() } + } + /// Acquires an unsafe pointer to the underlying user-defined packet. Note + /// that care must be taken to ensure that the queue outlives the usage of + /// the packet (because it is an unsafe pointer). + pub unsafe fn packet(&self) -> *mut P { + &mut (*self.state.get()).packet as *mut P + } +} + +impl Consumer { + /// Pops some data from this queue, returning `None` when the queue is + /// empty. + pub fn pop(&mut self) -> Option { + unsafe { (*self.state.get()).pop() } + } + /// Same function as the producer's `packet` method. + pub unsafe fn packet(&self) -> *mut P { + &mut (*self.state.get()).packet as *mut P + } +} + +impl State { + // remember that there is only one thread executing `push` (and only one + // thread executing `pop`) + unsafe fn push(&mut self, t: T) { + // Acquire a node (which either uses a cached one or allocates a new + // one), and then append this to the 'head' node. + let n = self.alloc(); + assert!((*n).value.is_none()); + (*n).value = Some(t); + (*n).next.store(0 as *mut Node, Relaxed); + (*self.head).next.store(n, Release); + self.head = n; + } + + unsafe fn alloc(&mut self) -> *mut Node { + // First try to see if we can consume the 'first' node for our uses. + // We try to avoid as many atomic instructions as possible here, so + // the addition to cache_subtractions is not atomic (plus we're the + // only one subtracting from the cache). + if self.first != self.tail_copy { + if self.cache_bound > 0 { + let b = self.cache_subtractions.load(Relaxed); + self.cache_subtractions.store(b + 1, Relaxed); + } + let ret = self.first; + self.first = (*ret).next.load(Relaxed); + return ret; + } + // If the above fails, then update our copy of the tail and try + // again. + self.tail_copy = self.tail_prev.load(Acquire); + if self.first != self.tail_copy { + if self.cache_bound > 0 { + let b = self.cache_subtractions.load(Relaxed); + self.cache_subtractions.store(b + 1, Relaxed); + } + let ret = self.first; + self.first = (*ret).next.load(Relaxed); + return ret; + } + // If all of that fails, then we have to allocate a new node + // (there's nothing in the node cache). + Node::new() + } + + // remember that there is only one thread executing `pop` (and only one + // thread executing `push`) + unsafe fn pop(&mut self) -> Option { + // The `tail` node is not actually a used node, but rather a + // sentinel from where we should start popping from. Hence, look at + // tail's next field and see if we can use it. If we do a pop, then + // the current tail node is a candidate for going into the cache. + let tail = self.tail; + let next = (*tail).next.load(Acquire); + if next.is_null() { return None } + assert!((*next).value.is_some()); + let ret = (*next).value.take(); + + self.tail = next; + if self.cache_bound == 0 { + self.tail_prev.store(tail, Release); + } else { + // XXX: this is dubious with overflow. + let additions = self.cache_additions.load(Relaxed); + let subtractions = self.cache_subtractions.load(Relaxed); + let size = additions - subtractions; + + if size < self.cache_bound { + self.tail_prev.store(tail, Release); + self.cache_additions.store(additions + 1, Relaxed); + } else { + (*self.tail_prev.load(Relaxed)).next.store(next, Relaxed); + // We have successfully erased all references to 'tail', so + // now we can safely drop it. + let _: ~Node = cast::transmute(tail); + } + } + return ret; + } + + unsafe fn is_empty(&self) -> bool { + let tail = self.tail; + let next = (*tail).next.load(Acquire); + return next.is_null(); + } +} + +#[unsafe_destructor] +impl Drop for State { + fn drop(&mut self) { + unsafe { + let mut cur = self.first; + while !cur.is_null() { + let next = (*cur).next.load(Relaxed); + let _n: ~Node = cast::transmute(cur); + cur = next; + } + } + } +} + +#[cfg(test)] +mod test { + use prelude::*; + use super::queue; + use task; + + #[test] + fn smoke() { + let (mut c, mut p) = queue(0, ()); + p.push(1); + p.push(2); + assert_eq!(c.pop(), Some(1)); + assert_eq!(c.pop(), Some(2)); + assert_eq!(c.pop(), None); + p.push(3); + p.push(4); + assert_eq!(c.pop(), Some(3)); + assert_eq!(c.pop(), Some(4)); + assert_eq!(c.pop(), None); + } + + #[test] + fn drop_full() { + let (_, mut p) = queue(0, ()); + p.push(~1); + p.push(~2); + } + + #[test] + fn smoke_bound() { + let (mut c, mut p) = queue(1, ()); + p.push(1); + p.push(2); + assert_eq!(c.pop(), Some(1)); + assert_eq!(c.pop(), Some(2)); + assert_eq!(c.pop(), None); + p.push(3); + p.push(4); + assert_eq!(c.pop(), Some(3)); + assert_eq!(c.pop(), Some(4)); + assert_eq!(c.pop(), None); + } + + #[test] + fn stress() { + stress_bound(0); + stress_bound(1); + + fn stress_bound(bound: uint) { + let (c, mut p) = queue(bound, ()); + do task::spawn_sched(task::SingleThreaded) { + let mut c = c; + for _ in range(0, 100000) { + loop { + match c.pop() { + Some(1) => break, + Some(_) => fail!(), + None => {} + } + } + } + } + for _ in range(0, 100000) { + p.push(1); + } + } + } +} diff --git a/src/libstd/unstable/atomics.rs b/src/libstd/unstable/atomics.rs deleted file mode 100644 index 9aaccb3ebba..00000000000 --- a/src/libstd/unstable/atomics.rs +++ /dev/null @@ -1,600 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*! - * Atomic types - * - * Basic atomic types supporting atomic operations. Each method takes an `Ordering` which - * represents the strength of the memory barrier for that operation. These orderings are the same - * as C++11 atomic orderings [http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync] - * - * All atomic types are a single word in size. - */ - -use unstable::intrinsics; -use cast; -use option::{Option,Some,None}; -use libc::c_void; -use ops::Drop; -use util::NonCopyable; - -/** - * A simple atomic flag, that can be set and cleared. The most basic atomic type. - */ -pub struct AtomicFlag { - priv v: int, - priv nocopy: NonCopyable -} - -/** - * An atomic boolean type. - */ -pub struct AtomicBool { - priv v: uint, - priv nocopy: NonCopyable -} - -/** - * A signed atomic integer type, supporting basic atomic arithmetic operations - */ -pub struct AtomicInt { - priv v: int, - priv nocopy: NonCopyable -} - -/** - * An unsigned atomic integer type, supporting basic atomic arithmetic operations - */ -pub struct AtomicUint { - priv v: uint, - priv nocopy: NonCopyable -} - -/** - * An unsafe atomic pointer. Only supports basic atomic operations - */ -pub struct AtomicPtr { - priv p: *mut T, - priv nocopy: NonCopyable -} - -/** - * An owned atomic pointer. Ensures that only a single reference to the data is held at any time. - */ -#[unsafe_no_drop_flag] -pub struct AtomicOption { - priv p: *mut c_void -} - -pub enum Ordering { - Relaxed, - Release, - Acquire, - AcqRel, - SeqCst -} - -pub static INIT_ATOMIC_FLAG : AtomicFlag = AtomicFlag { v: 0, nocopy: NonCopyable }; -pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: 0, nocopy: NonCopyable }; -pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: 0, nocopy: NonCopyable }; -pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: 0, nocopy: NonCopyable }; - -impl AtomicFlag { - - pub fn new() -> AtomicFlag { - AtomicFlag { v: 0, nocopy: NonCopyable } - } - - /** - * Clears the atomic flag - */ - #[inline] - pub fn clear(&mut self, order: Ordering) { - unsafe {atomic_store(&mut self.v, 0, order)} - } - - /** - * Sets the flag if it was previously unset, returns the previous value of the - * flag. - */ - #[inline] - pub fn test_and_set(&mut self, order: Ordering) -> bool { - unsafe { atomic_compare_and_swap(&mut self.v, 0, 1, order) > 0 } - } -} - -impl AtomicBool { - pub fn new(v: bool) -> AtomicBool { - AtomicBool { v: if v { 1 } else { 0 }, nocopy: NonCopyable } - } - - #[inline] - pub fn load(&self, order: Ordering) -> bool { - unsafe { atomic_load(&self.v, order) > 0 } - } - - #[inline] - pub fn store(&mut self, val: bool, order: Ordering) { - let val = if val { 1 } else { 0 }; - - unsafe { atomic_store(&mut self.v, val, order); } - } - - #[inline] - pub fn swap(&mut self, val: bool, order: Ordering) -> bool { - let val = if val { 1 } else { 0 }; - - unsafe { atomic_swap(&mut self.v, val, order) > 0 } - } - - #[inline] - pub fn compare_and_swap(&mut self, old: bool, new: bool, order: Ordering) -> bool { - let old = if old { 1 } else { 0 }; - let new = if new { 1 } else { 0 }; - - unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) > 0 } - } - - /// Returns the old value - #[inline] - pub fn fetch_and(&mut self, val: bool, order: Ordering) -> bool { - let val = if val { 1 } else { 0 }; - - unsafe { atomic_and(&mut self.v, val, order) > 0 } - } - - /// Returns the old value - #[inline] - pub fn fetch_nand(&mut self, val: bool, order: Ordering) -> bool { - let val = if val { 1 } else { 0 }; - - unsafe { atomic_nand(&mut self.v, val, order) > 0 } - } - - /// Returns the old value - #[inline] - pub fn fetch_or(&mut self, val: bool, order: Ordering) -> bool { - let val = if val { 1 } else { 0 }; - - unsafe { atomic_or(&mut self.v, val, order) > 0 } - } - - /// Returns the old value - #[inline] - pub fn fetch_xor(&mut self, val: bool, order: Ordering) -> bool { - let val = if val { 1 } else { 0 }; - - unsafe { atomic_xor(&mut self.v, val, order) > 0 } - } -} - -impl AtomicInt { - pub fn new(v: int) -> AtomicInt { - AtomicInt { v:v, nocopy: NonCopyable } - } - - #[inline] - pub fn load(&self, order: Ordering) -> int { - unsafe { atomic_load(&self.v, order) } - } - - #[inline] - pub fn store(&mut self, val: int, order: Ordering) { - unsafe { atomic_store(&mut self.v, val, order); } - } - - #[inline] - pub fn swap(&mut self, val: int, order: Ordering) -> int { - unsafe { atomic_swap(&mut self.v, val, order) } - } - - #[inline] - pub fn compare_and_swap(&mut self, old: int, new: int, order: Ordering) -> int { - unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } - } - - /// Returns the old value (like __sync_fetch_and_add). - #[inline] - pub fn fetch_add(&mut self, val: int, order: Ordering) -> int { - unsafe { atomic_add(&mut self.v, val, order) } - } - - /// Returns the old value (like __sync_fetch_and_sub). - #[inline] - pub fn fetch_sub(&mut self, val: int, order: Ordering) -> int { - unsafe { atomic_sub(&mut self.v, val, order) } - } -} - -impl AtomicUint { - pub fn new(v: uint) -> AtomicUint { - AtomicUint { v:v, nocopy: NonCopyable } - } - - #[inline] - pub fn load(&self, order: Ordering) -> uint { - unsafe { atomic_load(&self.v, order) } - } - - #[inline] - pub fn store(&mut self, val: uint, order: Ordering) { - unsafe { atomic_store(&mut self.v, val, order); } - } - - #[inline] - pub fn swap(&mut self, val: uint, order: Ordering) -> uint { - unsafe { atomic_swap(&mut self.v, val, order) } - } - - #[inline] - pub fn compare_and_swap(&mut self, old: uint, new: uint, order: Ordering) -> uint { - unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } - } - - /// Returns the old value (like __sync_fetch_and_add). - #[inline] - pub fn fetch_add(&mut self, val: uint, order: Ordering) -> uint { - unsafe { atomic_add(&mut self.v, val, order) } - } - - /// Returns the old value (like __sync_fetch_and_sub).. - #[inline] - pub fn fetch_sub(&mut self, val: uint, order: Ordering) -> uint { - unsafe { atomic_sub(&mut self.v, val, order) } - } -} - -impl AtomicPtr { - pub fn new(p: *mut T) -> AtomicPtr { - AtomicPtr { p:p, nocopy: NonCopyable } - } - - #[inline] - pub fn load(&self, order: Ordering) -> *mut T { - unsafe { atomic_load(&self.p, order) } - } - - #[inline] - pub fn store(&mut self, ptr: *mut T, order: Ordering) { - unsafe { atomic_store(&mut self.p, ptr, order); } - } - - #[inline] - pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T { - unsafe { atomic_swap(&mut self.p, ptr, order) } - } - - #[inline] - pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { - unsafe { atomic_compare_and_swap(&mut self.p, old, new, order) } - } -} - -impl AtomicOption { - pub fn new(p: ~T) -> AtomicOption { - unsafe { - AtomicOption { - p: cast::transmute(p) - } - } - } - - pub fn empty() -> AtomicOption { - unsafe { - AtomicOption { - p: cast::transmute(0) - } - } - } - - #[inline] - pub fn swap(&mut self, val: ~T, order: Ordering) -> Option<~T> { - unsafe { - let val = cast::transmute(val); - - let p = atomic_swap(&mut self.p, val, order); - let pv : &uint = cast::transmute(&p); - - if *pv == 0 { - None - } else { - Some(cast::transmute(p)) - } - } - } - - #[inline] - pub fn take(&mut self, order: Ordering) -> Option<~T> { - unsafe { - self.swap(cast::transmute(0), order) - } - } - - /// A compare-and-swap. Succeeds if the option is 'None' and returns 'None' - /// if so. If the option was already 'Some', returns 'Some' of the rejected - /// value. - #[inline] - pub fn fill(&mut self, val: ~T, order: Ordering) -> Option<~T> { - unsafe { - let val = cast::transmute(val); - let expected = cast::transmute(0); - let oldval = atomic_compare_and_swap(&mut self.p, expected, val, order); - if oldval == expected { - None - } else { - Some(cast::transmute(val)) - } - } - } - - /// Be careful: The caller must have some external method of ensuring the - /// result does not get invalidated by another task after this returns. - #[inline] - pub fn is_empty(&mut self, order: Ordering) -> bool { - unsafe { atomic_load(&self.p, order) == cast::transmute(0) } - } -} - -#[unsafe_destructor] -impl Drop for AtomicOption { - fn drop(&mut self) { - let _ = self.take(SeqCst); - } -} - -#[inline] -pub unsafe fn atomic_store(dst: &mut T, val: T, order:Ordering) { - let dst = cast::transmute(dst); - let val = cast::transmute(val); - - match order { - Release => intrinsics::atomic_store_rel(dst, val), - Relaxed => intrinsics::atomic_store_relaxed(dst, val), - _ => intrinsics::atomic_store(dst, val) - } -} - -#[inline] -pub unsafe fn atomic_load(dst: &T, order:Ordering) -> T { - let dst = cast::transmute(dst); - - cast::transmute(match order { - Acquire => intrinsics::atomic_load_acq(dst), - Relaxed => intrinsics::atomic_load_relaxed(dst), - _ => intrinsics::atomic_load(dst) - }) -} - -#[inline] -pub unsafe fn atomic_swap(dst: &mut T, val: T, order: Ordering) -> T { - let dst = cast::transmute(dst); - let val = cast::transmute(val); - - cast::transmute(match order { - Acquire => intrinsics::atomic_xchg_acq(dst, val), - Release => intrinsics::atomic_xchg_rel(dst, val), - AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), - Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), - _ => intrinsics::atomic_xchg(dst, val) - }) -} - -/// Returns the old value (like __sync_fetch_and_add). -#[inline] -pub unsafe fn atomic_add(dst: &mut T, val: T, order: Ordering) -> T { - let dst = cast::transmute(dst); - let val = cast::transmute(val); - - cast::transmute(match order { - Acquire => intrinsics::atomic_xadd_acq(dst, val), - Release => intrinsics::atomic_xadd_rel(dst, val), - AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), - Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), - _ => intrinsics::atomic_xadd(dst, val) - }) -} - -/// Returns the old value (like __sync_fetch_and_sub). -#[inline] -pub unsafe fn atomic_sub(dst: &mut T, val: T, order: Ordering) -> T { - let dst = cast::transmute(dst); - let val = cast::transmute(val); - - cast::transmute(match order { - Acquire => intrinsics::atomic_xsub_acq(dst, val), - Release => intrinsics::atomic_xsub_rel(dst, val), - AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), - Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), - _ => intrinsics::atomic_xsub(dst, val) - }) -} - -#[inline] -pub unsafe fn atomic_compare_and_swap(dst:&mut T, old:T, new:T, order: Ordering) -> T { - let dst = cast::transmute(dst); - let old = cast::transmute(old); - let new = cast::transmute(new); - - cast::transmute(match order { - Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), - Release => intrinsics::atomic_cxchg_rel(dst, old, new), - AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new), - Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new), - _ => intrinsics::atomic_cxchg(dst, old, new), - }) -} - -#[inline] -pub unsafe fn atomic_and(dst: &mut T, val: T, order: Ordering) -> T { - let dst = cast::transmute(dst); - let val = cast::transmute(val); - - cast::transmute(match order { - Acquire => intrinsics::atomic_and_acq(dst, val), - Release => intrinsics::atomic_and_rel(dst, val), - AcqRel => intrinsics::atomic_and_acqrel(dst, val), - Relaxed => intrinsics::atomic_and_relaxed(dst, val), - _ => intrinsics::atomic_and(dst, val) - }) -} - - -#[inline] -pub unsafe fn atomic_nand(dst: &mut T, val: T, order: Ordering) -> T { - let dst = cast::transmute(dst); - let val = cast::transmute(val); - - cast::transmute(match order { - Acquire => intrinsics::atomic_nand_acq(dst, val), - Release => intrinsics::atomic_nand_rel(dst, val), - AcqRel => intrinsics::atomic_nand_acqrel(dst, val), - Relaxed => intrinsics::atomic_nand_relaxed(dst, val), - _ => intrinsics::atomic_nand(dst, val) - }) -} - - -#[inline] -pub unsafe fn atomic_or(dst: &mut T, val: T, order: Ordering) -> T { - let dst = cast::transmute(dst); - let val = cast::transmute(val); - - cast::transmute(match order { - Acquire => intrinsics::atomic_or_acq(dst, val), - Release => intrinsics::atomic_or_rel(dst, val), - AcqRel => intrinsics::atomic_or_acqrel(dst, val), - Relaxed => intrinsics::atomic_or_relaxed(dst, val), - _ => intrinsics::atomic_or(dst, val) - }) -} - - -#[inline] -pub unsafe fn atomic_xor(dst: &mut T, val: T, order: Ordering) -> T { - let dst = cast::transmute(dst); - let val = cast::transmute(val); - - cast::transmute(match order { - Acquire => intrinsics::atomic_xor_acq(dst, val), - Release => intrinsics::atomic_xor_rel(dst, val), - AcqRel => intrinsics::atomic_xor_acqrel(dst, val), - Relaxed => intrinsics::atomic_xor_relaxed(dst, val), - _ => intrinsics::atomic_xor(dst, val) - }) -} - - -/** - * An atomic fence. - * - * A fence 'A' which has `Release` ordering semantics, synchronizes with a - * fence 'B' with (at least) `Acquire` semantics, if and only if there exists - * atomic operations X and Y, both operating on some atomic object 'M' such - * that A is sequenced before X, Y is synchronized before B and Y observers - * the change to M. This provides a happens-before dependence between A and B. - * - * Atomic operations with `Release` or `Acquire` semantics can also synchronize - * with a fence. - * - * A fence with has `SeqCst` ordering, in addition to having both `Acquire` and - * `Release` semantics, participates in the global program order of the other - * `SeqCst` operations and/or fences. - * - * Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings. - */ -#[inline] -pub fn fence(order: Ordering) { - unsafe { - match order { - Acquire => intrinsics::atomic_fence_acq(), - Release => intrinsics::atomic_fence_rel(), - AcqRel => intrinsics::atomic_fence_rel(), - _ => intrinsics::atomic_fence(), - } - } -} - -#[cfg(test)] -mod test { - use option::*; - use super::*; - - #[test] - fn flag() { - let mut flg = AtomicFlag::new(); - assert!(!flg.test_and_set(SeqCst)); - assert!(flg.test_and_set(SeqCst)); - - flg.clear(SeqCst); - assert!(!flg.test_and_set(SeqCst)); - } - - #[test] - fn option_empty() { - let mut option: AtomicOption<()> = AtomicOption::empty(); - assert!(option.is_empty(SeqCst)); - } - - #[test] - fn option_swap() { - let mut p = AtomicOption::new(~1); - let a = ~2; - - let b = p.swap(a, SeqCst); - - assert_eq!(b, Some(~1)); - assert_eq!(p.take(SeqCst), Some(~2)); - } - - #[test] - fn option_take() { - let mut p = AtomicOption::new(~1); - - assert_eq!(p.take(SeqCst), Some(~1)); - assert_eq!(p.take(SeqCst), None); - - let p2 = ~2; - p.swap(p2, SeqCst); - - assert_eq!(p.take(SeqCst), Some(~2)); - } - - #[test] - fn option_fill() { - let mut p = AtomicOption::new(~1); - assert!(p.fill(~2, SeqCst).is_some()); // should fail; shouldn't leak! - assert_eq!(p.take(SeqCst), Some(~1)); - - assert!(p.fill(~2, SeqCst).is_none()); // shouldn't fail - assert_eq!(p.take(SeqCst), Some(~2)); - } - - #[test] - fn bool_and() { - let mut a = AtomicBool::new(true); - assert_eq!(a.fetch_and(false, SeqCst),true); - assert_eq!(a.load(SeqCst),false); - } - - static mut S_FLAG : AtomicFlag = INIT_ATOMIC_FLAG; - static mut S_BOOL : AtomicBool = INIT_ATOMIC_BOOL; - static mut S_INT : AtomicInt = INIT_ATOMIC_INT; - static mut S_UINT : AtomicUint = INIT_ATOMIC_UINT; - - #[test] - fn static_init() { - unsafe { - assert!(!S_FLAG.test_and_set(SeqCst)); - assert!(!S_BOOL.load(SeqCst)); - assert!(S_INT.load(SeqCst) == 0); - assert!(S_UINT.load(SeqCst) == 0); - } - } -} diff --git a/src/libstd/unstable/dynamic_lib.rs b/src/libstd/unstable/dynamic_lib.rs index 03b25fbd044..0569fe32c58 100644 --- a/src/libstd/unstable/dynamic_lib.rs +++ b/src/libstd/unstable/dynamic_lib.rs @@ -140,7 +140,6 @@ pub mod dl { use path; use ptr; use str; - use unstable::sync::atomic; use result::*; pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void { @@ -158,11 +157,7 @@ pub mod dl { static mut lock: Mutex = MUTEX_INIT; unsafe { // dlerror isn't thread safe, so we need to lock around this entire - // sequence. `atomic` asserts that we don't do anything that - // would cause this task to be descheduled, which could deadlock - // the scheduler if it happens while the lock is held. - // FIXME #9105 use a Rust mutex instead of C++ mutexes. - let _guard = atomic(); + // sequence lock.lock(); let _old_error = dlerror(); @@ -208,7 +203,6 @@ pub mod dl { use libc; use path; use ptr; - use unstable::sync::atomic; use result::*; pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void { @@ -225,7 +219,6 @@ pub mod dl { pub fn check_for_errors_in(f: || -> T) -> Result { unsafe { - let _guard = atomic(); SetLastError(0); let result = f(); diff --git a/src/libstd/unstable/mod.rs b/src/libstd/unstable/mod.rs index 043d99eb1b8..f70d0b5169f 100644 --- a/src/libstd/unstable/mod.rs +++ b/src/libstd/unstable/mod.rs @@ -22,7 +22,6 @@ pub mod simd; pub mod lang; pub mod sync; pub mod mutex; -pub mod atomics; pub mod raw; /** diff --git a/src/libstd/unstable/mutex.rs b/src/libstd/unstable/mutex.rs index 3e7a861b385..eaf716f2726 100644 --- a/src/libstd/unstable/mutex.rs +++ b/src/libstd/unstable/mutex.rs @@ -48,7 +48,7 @@ #[allow(non_camel_case_types)]; use libc::c_void; -use unstable::atomics; +use sync::atomics; pub struct Mutex { // pointers for the lock/cond handles, atomically updated diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs index 50fae1e0239..ad36f71cdea 100644 --- a/src/libstd/unstable/sync.rs +++ b/src/libstd/unstable/sync.rs @@ -8,353 +8,12 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use cast; -use comm::{Chan, Port}; -use ptr; -use option::{Option,Some,None}; -use task; -use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,Relaxed,SeqCst}; -use unstable::mutex::Mutex; -use ops::Drop; use clone::Clone; use kinds::Send; -use vec; - -/// An atomically reference counted pointer. -/// -/// Enforces no shared-memory safety. -//#[unsafe_no_drop_flag] FIXME: #9758 -pub struct UnsafeArc { - data: *mut ArcData, -} - -pub enum UnsafeArcUnwrap { - UnsafeArcSelf(UnsafeArc), - UnsafeArcT(T) -} - -#[cfg(test)] -impl UnsafeArcUnwrap { - fn expect_t(self, msg: &'static str) -> T { - match self { - UnsafeArcSelf(_) => fail!(msg), - UnsafeArcT(t) => t - } - } - - fn is_self(&self) -> bool { - match *self { - UnsafeArcSelf(_) => true, - UnsafeArcT(_) => false - } - } -} - -struct ArcData { - count: AtomicUint, - // An unwrapper uses this protocol to communicate with the "other" task that - // drops the last refcount on an arc. Unfortunately this can't be a proper - // pipe protocol because the unwrapper has to access both stages at once. - // FIXME(#7544): Maybe use AtomicPtr instead (to avoid xchg in take() later)? - unwrapper: AtomicOption<(Chan<()>, Port)>, - // FIXME(#3224) should be able to make this non-option to save memory - data: Option, -} - -unsafe fn new_inner(data: T, refcount: uint) -> *mut ArcData { - let data = ~ArcData { count: AtomicUint::new(refcount), - unwrapper: AtomicOption::empty(), - data: Some(data) }; - cast::transmute(data) -} - -/// A helper object used by `UnsafeArc::unwrap`. -struct ChannelAndDataGuard { - channel: Option>, - data: Option<~ArcData>, -} - -#[unsafe_destructor] -impl Drop for ChannelAndDataGuard { - fn drop(&mut self) { - if task::failing() { - // Killed during wait. Because this might happen while - // someone else still holds a reference, we can't free - // the data now; the "other" last refcount will free it. - unsafe { - let channel = self.channel.take_unwrap(); - let data = self.data.take_unwrap(); - channel.send(false); - cast::forget(data); - } - } - } -} - -impl ChannelAndDataGuard { - fn unwrap(mut self) -> (Chan, ~ArcData) { - (self.channel.take_unwrap(), self.data.take_unwrap()) - } -} - -impl UnsafeArc { - pub fn new(data: T) -> UnsafeArc { - unsafe { UnsafeArc { data: new_inner(data, 1) } } - } - - /// As new(), but returns an extra pre-cloned handle. - pub fn new2(data: T) -> (UnsafeArc, UnsafeArc) { - unsafe { - let ptr = new_inner(data, 2); - (UnsafeArc { data: ptr }, UnsafeArc { data: ptr }) - } - } - - /// As new(), but returns a vector of as many pre-cloned handles as requested. - pub fn newN(data: T, num_handles: uint) -> ~[UnsafeArc] { - unsafe { - if num_handles == 0 { - ~[] // need to free data here - } else { - let ptr = new_inner(data, num_handles); - vec::from_fn(num_handles, |_| UnsafeArc { data: ptr }) - } - } - } - - /// As newN(), but from an already-existing handle. Uses one xadd. - pub fn cloneN(self, num_handles: uint) -> ~[UnsafeArc] { - if num_handles == 0 { - ~[] // The "num_handles - 1" trick (below) fails in the 0 case. - } else { - unsafe { - // Minus one because we are recycling the given handle's refcount. - let old_count = (*self.data).count.fetch_add(num_handles - 1, Acquire); - // let old_count = (*self.data).count.fetch_add(num_handles, Acquire); - assert!(old_count >= 1); - let ptr = self.data; - cast::forget(self); // Don't run the destructor on this handle. - vec::from_fn(num_handles, |_| UnsafeArc { data: ptr }) - } - } - } - - #[inline] - pub fn get(&self) -> *mut T { - unsafe { - assert!((*self.data).count.load(Relaxed) > 0); - let r: *mut T = (*self.data).data.get_mut_ref(); - return r; - } - } - - #[inline] - pub fn get_immut(&self) -> *T { - unsafe { - assert!((*self.data).count.load(Relaxed) > 0); - let r: *T = (*self.data).data.get_ref(); - return r; - } - } - - /// Wait until all other handles are dropped, then retrieve the enclosed - /// data. See extra::arc::Arc for specific semantics documentation. - /// If called when the task is already unkillable, unwrap will unkillably - /// block; otherwise, an unwrapping task can be killed by linked failure. - pub fn unwrap(self) -> T { - unsafe { - let mut this = self; - // The ~ dtor needs to run if this code succeeds. - let mut data: ~ArcData = cast::transmute(this.data); - // Set up the unwrap protocol. - let (p1,c1) = Chan::new(); // () - let (p2,c2) = Chan::new(); // bool - // Try to put our server end in the unwrapper slot. - // This needs no barrier -- it's protected by the release barrier on - // the xadd, and the acquire+release barrier in the destructor's xadd. - if data.unwrapper.fill(~(c1,p2), Relaxed).is_none() { - // Got in. Tell this handle's destructor not to run (we are now it). - this.data = ptr::mut_null(); - // Drop our own reference. - let old_count = data.count.fetch_sub(1, Release); - assert!(old_count >= 1); - if old_count == 1 { - // We were the last owner. Can unwrap immediately. - // AtomicOption's destructor will free the server endpoint. - // FIXME(#3224): it should be like this - // let ~ArcData { data: user_data, _ } = data; - // user_data - data.data.take_unwrap() - } else { - // The *next* person who sees the refcount hit 0 will wake us. - let c2_and_data = ChannelAndDataGuard { - channel: Some(c2), - data: Some(data), - }; - p1.recv(); - // Got here. Back in the 'unkillable' without getting killed. - let (c2, data) = c2_and_data.unwrap(); - c2.send(true); - // FIXME(#3224): it should be like this - // let ~ArcData { data: user_data, _ } = data; - // user_data - let mut data = data; - data.data.take_unwrap() - } - } else { - // If 'put' returns the server end back to us, we were rejected; - // someone else was trying to unwrap. Avoid guaranteed deadlock. - cast::forget(data); - fail!("Another task is already unwrapping this Arc!"); - } - } - } - - /// As unwrap above, but without blocking. Returns 'UnsafeArcSelf(self)' if this is - /// not the last reference; 'UnsafeArcT(unwrapped_data)' if so. - pub fn try_unwrap(mut self) -> UnsafeArcUnwrap { - unsafe { - // The ~ dtor needs to run if this code succeeds. - let mut data: ~ArcData = cast::transmute(self.data); - // This can of course race with anybody else who has a handle, but in - // such a case, the returned count will always be at least 2. If we - // see 1, no race was possible. All that matters is 1 or not-1. - let count = data.count.load(Acquire); - assert!(count >= 1); - // The more interesting race is one with an unwrapper. They may have - // already dropped their count -- but if so, the unwrapper pointer - // will have been set first, which the barriers ensure we will see. - // (Note: using is_empty(), not take(), to not free the unwrapper.) - if count == 1 && data.unwrapper.is_empty(Acquire) { - // Tell this handle's destructor not to run (we are now it). - self.data = ptr::mut_null(); - // FIXME(#3224) as above - UnsafeArcT(data.data.take_unwrap()) - } else { - cast::forget(data); - UnsafeArcSelf(self) - } - } - } -} - -impl Clone for UnsafeArc { - fn clone(&self) -> UnsafeArc { - unsafe { - // This barrier might be unnecessary, but I'm not sure... - let old_count = (*self.data).count.fetch_add(1, Acquire); - assert!(old_count >= 1); - return UnsafeArc { data: self.data }; - } - } -} - -#[unsafe_destructor] -impl Drop for UnsafeArc{ - fn drop(&mut self) { - unsafe { - // Happens when destructing an unwrapper's handle and from `#[unsafe_no_drop_flag]` - if self.data.is_null() { - return - } - let mut data: ~ArcData = cast::transmute(self.data); - // Must be acquire+release, not just release, to make sure this - // doesn't get reordered to after the unwrapper pointer load. - let old_count = data.count.fetch_sub(1, SeqCst); - assert!(old_count >= 1); - if old_count == 1 { - // Were we really last, or should we hand off to an - // unwrapper? It's safe to not xchg because the unwrapper - // will set the unwrap lock *before* dropping his/her - // reference. In effect, being here means we're the only - // *awake* task with the data. - match data.unwrapper.take(Acquire) { - Some(~(message, response)) => { - // Send 'ready' and wait for a response. - message.send(()); - // Unkillable wait. Message guaranteed to come. - if response.recv() { - // Other task got the data. - cast::forget(data); - } else { - // Other task was killed. drop glue takes over. - } - } - None => { - // drop glue takes over. - } - } - } else { - cast::forget(data); - } - } - } -} - - -/****************************************************************************/ - -pub struct AtomicGuard { - on: bool, -} - -impl Drop for AtomicGuard { - fn drop(&mut self) { - use rt::task::{Task, GreenTask, SchedTask}; - use rt::local::Local; - - if self.on { - unsafe { - let task_opt: Option<*mut Task> = Local::try_unsafe_borrow(); - match task_opt { - Some(t) => { - match (*t).task_type { - GreenTask(_) => (*t).death.allow_deschedule(), - SchedTask => {} - } - } - None => {} - } - } - } - } -} - -/** - * Enables a runtime assertion that no operation while the returned guard is - * live uses scheduler operations (deschedule, recv, spawn, etc). This is for - * use with pthread mutexes, which may block the entire scheduler thread, - * rather than just one task, and is hence prone to deadlocks if mixed with - * descheduling. - * - * NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section - * synchronization whatsoever. It only makes sense to use for CPU-local issues. - */ -// FIXME(#8140) should not be pub -pub unsafe fn atomic() -> AtomicGuard { - use rt::task::{Task, GreenTask, SchedTask}; - use rt::local::Local; - - let task_opt: Option<*mut Task> = Local::try_unsafe_borrow(); - match task_opt { - Some(t) => { - match (*t).task_type { - GreenTask(_) => { - (*t).death.inhibit_deschedule(); - return AtomicGuard { - on: true, - }; - } - SchedTask => {} - } - } - None => {} - } - - AtomicGuard { - on: false, - } -} +use ops::Drop; +use option::{Option,Some,None}; +use sync::arc::UnsafeArc; +use unstable::mutex::Mutex; pub struct LittleLock { priv l: Mutex, @@ -496,14 +155,6 @@ impl Exclusive { l.wait(); } } - - pub fn unwrap(self) -> T { - let Exclusive { x: x } = self; - // Someday we might need to unkillably unwrap an Exclusive, but not today. - let inner = x.unwrap(); - let ExData { data: user_data, .. } = inner; // will destroy the LittleLock - user_data - } } #[cfg(test)] @@ -514,20 +165,6 @@ mod tests { use task; use mem::size_of; - //#[unsafe_no_drop_flag] FIXME: #9758 - #[ignore] - #[test] - fn test_size() { - assert_eq!(size_of::>(), size_of::<*[int, ..10]>()); - } - - #[test] - fn test_atomic() { - // NB. The whole runtime will abort on an 'atomic-sleep' violation, - // so we can't really test for the converse behaviour. - unsafe { let _ = atomic(); } // oughtn't fail - } - #[test] fn exclusive_new_arc() { unsafe { @@ -570,114 +207,4 @@ mod tests { x.with(|one| assert_eq!(*one, 1)); } } - - #[test] - fn arclike_newN() { - // Tests that the many-refcounts-at-once constructors don't leak. - let _ = UnsafeArc::new2(~~"hello"); - let x = UnsafeArc::newN(~~"hello", 0); - assert_eq!(x.len(), 0) - let x = UnsafeArc::newN(~~"hello", 1); - assert_eq!(x.len(), 1) - let x = UnsafeArc::newN(~~"hello", 10); - assert_eq!(x.len(), 10) - } - - #[test] - fn arclike_cloneN() { - // Tests that the many-refcounts-at-once special-clone doesn't leak. - let x = UnsafeArc::new(~~"hello"); - let x = x.cloneN(0); - assert_eq!(x.len(), 0); - let x = UnsafeArc::new(~~"hello"); - let x = x.cloneN(1); - assert_eq!(x.len(), 1); - let x = UnsafeArc::new(~~"hello"); - let x = x.cloneN(10); - assert_eq!(x.len(), 10); - } - - #[test] - fn arclike_unwrap_basic() { - let x = UnsafeArc::new(~~"hello"); - assert!(x.unwrap() == ~~"hello"); - } - - #[test] - fn arclike_try_unwrap() { - let x = UnsafeArc::new(~~"hello"); - assert!(x.try_unwrap().expect_t("try_unwrap failed") == ~~"hello"); - } - - #[test] - fn arclike_try_unwrap_fail() { - let x = UnsafeArc::new(~~"hello"); - let x2 = x.clone(); - let left_x = x.try_unwrap(); - assert!(left_x.is_self()); - drop(left_x); - assert!(x2.try_unwrap().expect_t("try_unwrap none") == ~~"hello"); - } - - #[test] - fn arclike_try_unwrap_unwrap_race() { - // When an unwrap and a try_unwrap race, the unwrapper should always win. - let x = UnsafeArc::new(~~"hello"); - let x2 = x.clone(); - let (p,c) = Chan::new(); - do task::spawn { - c.send(()); - assert!(x2.unwrap() == ~~"hello"); - c.send(()); - } - p.recv(); - task::deschedule(); // Try to make the unwrapper get blocked first. - let left_x = x.try_unwrap(); - assert!(left_x.is_self()); - drop(left_x); - p.recv(); - } - - #[test] - fn exclusive_new_unwrap_basic() { - // Unlike the above, also tests no double-freeing of the LittleLock. - let x = Exclusive::new(~~"hello"); - assert!(x.unwrap() == ~~"hello"); - } - - #[test] - fn exclusive_new_unwrap_contended() { - let x = Exclusive::new(~~"hello"); - let x2 = x.clone(); - do task::spawn { - unsafe { x2.with(|_hello| ()); } - task::deschedule(); - } - assert!(x.unwrap() == ~~"hello"); - - // Now try the same thing, but with the child task blocking. - let x = Exclusive::new(~~"hello"); - let x2 = x.clone(); - let mut builder = task::task(); - let res = builder.future_result(); - do builder.spawn { - assert!(x2.unwrap() == ~~"hello"); - } - // Have to get rid of our reference before blocking. - drop(x); - res.recv(); - } - - #[test] #[should_fail] - fn exclusive_new_unwrap_conflict() { - let x = Exclusive::new(~~"hello"); - let x2 = x.clone(); - let mut builder = task::task(); - let res = builder.future_result(); - do builder.spawn { - assert!(x2.unwrap() == ~~"hello"); - } - assert!(x.unwrap() == ~~"hello"); - assert!(res.recv().is_ok()); - } } -- cgit 1.4.1-3-g733a5 From 4538369566b8b51fc8371253aa90f9725547a193 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 12 Dec 2013 17:30:41 -0800 Subject: std: Expose that LocalIo may not always be available It is not the case that all programs will always be able to acquire an instance of the LocalIo borrow, so this commit exposes this limitation by returning Option from LocalIo::borrow(). At the same time, a helper method LocalIo::maybe_raise() has been added in order to encapsulate the functionality of raising on io_error if there is on local I/O available. --- src/libstd/io/fs.rs | 66 +++++++++++++++++++------------------------ src/libstd/io/net/addrinfo.rs | 11 +------- src/libstd/io/net/tcp.rs | 25 ++++------------ src/libstd/io/net/udp.rs | 11 ++------ src/libstd/io/net/unix.rs | 22 ++++----------- src/libstd/io/pipe.rs | 18 ++++-------- src/libstd/io/process.rs | 24 ++++++++-------- src/libstd/io/signal.rs | 17 +++++------ src/libstd/io/stdio.rs | 26 +++++++---------- src/libstd/io/timer.rs | 14 ++------- src/libstd/rt/rtio.rs | 60 ++++++++++++++++++++++++--------------- 11 files changed, 118 insertions(+), 176 deletions(-) (limited to 'src/libstd/rt') diff --git a/src/libstd/io/fs.rs b/src/libstd/io/fs.rs index a1465ca7b33..ded1d254f3f 100644 --- a/src/libstd/io/fs.rs +++ b/src/libstd/io/fs.rs @@ -54,7 +54,7 @@ use super::{SeekStyle, Read, Write, Open, IoError, Truncate, use rt::rtio::{RtioFileStream, IoFactory, LocalIo}; use io; use option::{Some, None, Option}; -use result::{Ok, Err, Result}; +use result::{Ok, Err}; use path; use path::{Path, GenericPath}; use vec::{OwnedVector, ImmutableVector}; @@ -75,17 +75,6 @@ pub struct File { priv last_nread: int, } -fn io_raise(f: |io: &mut IoFactory| -> Result) -> Option { - let mut io = LocalIo::borrow(); - match f(io.get()) { - Ok(t) => Some(t), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } -} - impl File { /// Open a file at `path` in the mode specified by the `mode` and `access` /// arguments @@ -131,18 +120,15 @@ impl File { pub fn open_mode(path: &Path, mode: FileMode, access: FileAccess) -> Option { - let mut io = LocalIo::borrow(); - match io.get().fs_open(&path.to_c_str(), mode, access) { - Ok(fd) => Some(File { - path: path.clone(), - fd: fd, - last_nread: -1 - }), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.fs_open(&path.to_c_str(), mode, access).map(|fd| { + File { + path: path.clone(), + fd: fd, + last_nread: -1 + } + }) + }) } /// Attempts to open a file in read-only mode. This function is equivalent to @@ -242,7 +228,7 @@ impl File { /// directory, the user lacks permissions to remove the file, or if some /// other filesystem-level error occurs. pub fn unlink(path: &Path) { - io_raise(|io| io.fs_unlink(&path.to_c_str())); + LocalIo::maybe_raise(|io| io.fs_unlink(&path.to_c_str())); } /// Given a path, query the file system to get information about a file, @@ -270,7 +256,9 @@ pub fn unlink(path: &Path) { /// requisite permissions to perform a `stat` call on the given path or if /// there is no entry in the filesystem at the provided path. pub fn stat(path: &Path) -> FileStat { - io_raise(|io| io.fs_stat(&path.to_c_str())).unwrap_or_else(dummystat) + LocalIo::maybe_raise(|io| { + io.fs_stat(&path.to_c_str()) + }).unwrap_or_else(dummystat) } fn dummystat() -> FileStat { @@ -306,7 +294,9 @@ fn dummystat() -> FileStat { /// /// See `stat` pub fn lstat(path: &Path) -> FileStat { - io_raise(|io| io.fs_lstat(&path.to_c_str())).unwrap_or_else(dummystat) + LocalIo::maybe_raise(|io| { + io.fs_lstat(&path.to_c_str()) + }).unwrap_or_else(dummystat) } /// Rename a file or directory to a new name. @@ -324,7 +314,7 @@ pub fn lstat(path: &Path) -> FileStat { /// the process lacks permissions to view the contents, or if some other /// intermittent I/O error occurs. pub fn rename(from: &Path, to: &Path) { - io_raise(|io| io.fs_rename(&from.to_c_str(), &to.to_c_str())); + LocalIo::maybe_raise(|io| io.fs_rename(&from.to_c_str(), &to.to_c_str())); } /// Copies the contents of one file to another. This function will also @@ -395,7 +385,7 @@ pub fn copy(from: &Path, to: &Path) { /// condition. Some possible error situations are not having the permission to /// change the attributes of a file or the file not existing. pub fn chmod(path: &Path, mode: io::FilePermission) { - io_raise(|io| io.fs_chmod(&path.to_c_str(), mode)); + LocalIo::maybe_raise(|io| io.fs_chmod(&path.to_c_str(), mode)); } /// Change the user and group owners of a file at the specified path. @@ -404,7 +394,7 @@ pub fn chmod(path: &Path, mode: io::FilePermission) { /// /// This function will raise on the `io_error` condition on failure. pub fn chown(path: &Path, uid: int, gid: int) { - io_raise(|io| io.fs_chown(&path.to_c_str(), uid, gid)); + LocalIo::maybe_raise(|io| io.fs_chown(&path.to_c_str(), uid, gid)); } /// Creates a new hard link on the filesystem. The `dst` path will be a @@ -415,7 +405,7 @@ pub fn chown(path: &Path, uid: int, gid: int) { /// /// This function will raise on the `io_error` condition on failure. pub fn link(src: &Path, dst: &Path) { - io_raise(|io| io.fs_link(&src.to_c_str(), &dst.to_c_str())); + LocalIo::maybe_raise(|io| io.fs_link(&src.to_c_str(), &dst.to_c_str())); } /// Creates a new symbolic link on the filesystem. The `dst` path will be a @@ -425,7 +415,7 @@ pub fn link(src: &Path, dst: &Path) { /// /// This function will raise on the `io_error` condition on failure. pub fn symlink(src: &Path, dst: &Path) { - io_raise(|io| io.fs_symlink(&src.to_c_str(), &dst.to_c_str())); + LocalIo::maybe_raise(|io| io.fs_symlink(&src.to_c_str(), &dst.to_c_str())); } /// Reads a symlink, returning the file that the symlink points to. @@ -436,7 +426,7 @@ pub fn symlink(src: &Path, dst: &Path) { /// conditions include reading a file that does not exist or reading a file /// which is not a symlink. pub fn readlink(path: &Path) -> Option { - io_raise(|io| io.fs_readlink(&path.to_c_str())) + LocalIo::maybe_raise(|io| io.fs_readlink(&path.to_c_str())) } /// Create a new, empty directory at the provided path @@ -456,7 +446,7 @@ pub fn readlink(path: &Path) -> Option { /// to make a new directory at the provided path, or if the directory already /// exists. pub fn mkdir(path: &Path, mode: FilePermission) { - io_raise(|io| io.fs_mkdir(&path.to_c_str(), mode)); + LocalIo::maybe_raise(|io| io.fs_mkdir(&path.to_c_str(), mode)); } /// Remove an existing, empty directory @@ -475,7 +465,7 @@ pub fn mkdir(path: &Path, mode: FilePermission) { /// to remove the directory at the provided path, or if the directory isn't /// empty. pub fn rmdir(path: &Path) { - io_raise(|io| io.fs_rmdir(&path.to_c_str())); + LocalIo::maybe_raise(|io| io.fs_rmdir(&path.to_c_str())); } /// Retrieve a vector containing all entries within a provided directory @@ -502,7 +492,9 @@ pub fn rmdir(path: &Path) { /// the process lacks permissions to view the contents or if the `path` points /// at a non-directory file pub fn readdir(path: &Path) -> ~[Path] { - io_raise(|io| io.fs_readdir(&path.to_c_str(), 0)).unwrap_or_else(|| ~[]) + LocalIo::maybe_raise(|io| { + io.fs_readdir(&path.to_c_str(), 0) + }).unwrap_or_else(|| ~[]) } /// Returns an iterator which will recursively walk the directory structure @@ -583,7 +575,7 @@ pub fn rmdir_recursive(path: &Path) { /// happens. // FIXME(#10301) these arguments should not be u64 pub fn change_file_times(path: &Path, atime: u64, mtime: u64) { - io_raise(|io| io.fs_utime(&path.to_c_str(), atime, mtime)); + LocalIo::maybe_raise(|io| io.fs_utime(&path.to_c_str(), atime, mtime)); } impl Reader for File { diff --git a/src/libstd/io/net/addrinfo.rs b/src/libstd/io/net/addrinfo.rs index 7df4fdd2266..6d968de209c 100644 --- a/src/libstd/io/net/addrinfo.rs +++ b/src/libstd/io/net/addrinfo.rs @@ -18,8 +18,6 @@ getaddrinfo() */ use option::{Option, Some, None}; -use result::{Ok, Err}; -use io::{io_error}; use io::net::ip::{SocketAddr, IpAddr}; use rt::rtio::{IoFactory, LocalIo}; use vec::ImmutableVector; @@ -97,14 +95,7 @@ pub fn get_host_addresses(host: &str) -> Option<~[IpAddr]> { /// consumption just yet. fn lookup(hostname: Option<&str>, servname: Option<&str>, hint: Option) -> Option<~[Info]> { - let mut io = LocalIo::borrow(); - match io.get().get_host_addresses(hostname, servname, hint) { - Ok(i) => Some(i), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| io.get_host_addresses(hostname, servname, hint)) } #[cfg(test)] diff --git a/src/libstd/io/net/tcp.rs b/src/libstd/io/net/tcp.rs index db51653d665..bd7d8bacb38 100644 --- a/src/libstd/io/net/tcp.rs +++ b/src/libstd/io/net/tcp.rs @@ -26,17 +26,9 @@ impl TcpStream { } pub fn connect(addr: SocketAddr) -> Option { - let result = { - let mut io = LocalIo::borrow(); - io.get().tcp_connect(addr) - }; - match result { - Ok(s) => Some(TcpStream::new(s)), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.tcp_connect(addr).map(TcpStream::new) + }) } pub fn peer_name(&mut self) -> Option { @@ -94,14 +86,9 @@ pub struct TcpListener { impl TcpListener { pub fn bind(addr: SocketAddr) -> Option { - let mut io = LocalIo::borrow(); - match io.get().tcp_bind(addr) { - Ok(l) => Some(TcpListener { obj: l }), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.tcp_bind(addr).map(|l| TcpListener { obj: l }) + }) } pub fn socket_name(&mut self) -> Option { diff --git a/src/libstd/io/net/udp.rs b/src/libstd/io/net/udp.rs index 0a277ee4347..159823ba2b5 100644 --- a/src/libstd/io/net/udp.rs +++ b/src/libstd/io/net/udp.rs @@ -21,14 +21,9 @@ pub struct UdpSocket { impl UdpSocket { pub fn bind(addr: SocketAddr) -> Option { - let mut io = LocalIo::borrow(); - match io.get().udp_bind(addr) { - Ok(s) => Some(UdpSocket { obj: s }), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.udp_bind(addr).map(|s| UdpSocket { obj: s }) + }) } pub fn recvfrom(&mut self, buf: &mut [u8]) -> Option<(uint, SocketAddr)> { diff --git a/src/libstd/io/net/unix.rs b/src/libstd/io/net/unix.rs index d8abd1fe50d..8fd256a22f9 100644 --- a/src/libstd/io/net/unix.rs +++ b/src/libstd/io/net/unix.rs @@ -59,14 +59,9 @@ impl UnixStream { /// stream.write([1, 2, 3]); /// pub fn connect(path: &P) -> Option { - let mut io = LocalIo::borrow(); - match io.get().unix_connect(&path.to_c_str()) { - Ok(s) => Some(UnixStream::new(s)), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.unix_connect(&path.to_c_str()).map(UnixStream::new) + }) } } @@ -107,14 +102,9 @@ impl UnixListener { /// } /// pub fn bind(path: &P) -> Option { - let mut io = LocalIo::borrow(); - match io.get().unix_bind(&path.to_c_str()) { - Ok(s) => Some(UnixListener{ obj: s }), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| { + io.unix_bind(&path.to_c_str()).map(|s| UnixListener { obj: s }) + }) } } diff --git a/src/libstd/io/pipe.rs b/src/libstd/io/pipe.rs index 252575ee445..2349c64a84b 100644 --- a/src/libstd/io/pipe.rs +++ b/src/libstd/io/pipe.rs @@ -14,10 +14,9 @@ //! enough so that pipes can be created to child processes. use prelude::*; -use super::{Reader, Writer}; use io::{io_error, EndOfFile}; -use io::native::file; -use rt::rtio::{LocalIo, RtioPipe}; +use libc; +use rt::rtio::{RtioPipe, LocalIo}; pub struct PipeStream { priv obj: ~RtioPipe, @@ -43,15 +42,10 @@ impl PipeStream { /// /// If the pipe cannot be created, an error will be raised on the /// `io_error` condition. - pub fn open(fd: file::fd_t) -> Option { - let mut io = LocalIo::borrow(); - match io.get().pipe_open(fd) { - Ok(obj) => Some(PipeStream { obj: obj }), - Err(e) => { - io_error::cond.raise(e); - None - } - } + pub fn open(fd: libc::c_int) -> Option { + LocalIo::maybe_raise(|io| { + io.pipe_open(fd).map(|obj| PipeStream { obj: obj }) + }) } pub fn new(inner: ~RtioPipe) -> PipeStream { diff --git a/src/libstd/io/process.rs b/src/libstd/io/process.rs index 001faa1ecaf..bbb2a7ef398 100644 --- a/src/libstd/io/process.rs +++ b/src/libstd/io/process.rs @@ -119,19 +119,17 @@ impl Process { /// Creates a new pipe initialized, but not bound to any particular /// source/destination pub fn new(config: ProcessConfig) -> Option { - let mut io = LocalIo::borrow(); - match io.get().spawn(config) { - Ok((p, io)) => Some(Process{ - handle: p, - io: io.move_iter().map(|p| - p.map(|p| io::PipeStream::new(p)) - ).collect() - }), - Err(ioerr) => { - io_error::cond.raise(ioerr); - None - } - } + let mut config = Some(config); + LocalIo::maybe_raise(|io| { + io.spawn(config.take_unwrap()).map(|(p, io)| { + Process { + handle: p, + io: io.move_iter().map(|p| { + p.map(|p| io::PipeStream::new(p)) + }).collect() + } + }) + }) } /// Returns the process id of this child process diff --git a/src/libstd/io/signal.rs b/src/libstd/io/signal.rs index 00d84e22c25..4cde35796a6 100644 --- a/src/libstd/io/signal.rs +++ b/src/libstd/io/signal.rs @@ -23,8 +23,7 @@ use clone::Clone; use comm::{Port, SharedChan}; use container::{Map, MutableMap}; use hashmap; -use io::io_error; -use result::{Err, Ok}; +use option::{Some, None}; use rt::rtio::{IoFactory, LocalIo, RtioSignal}; #[repr(int)] @@ -122,16 +121,14 @@ impl Listener { if self.handles.contains_key(&signum) { return true; // self is already listening to signum, so succeed } - let mut io = LocalIo::borrow(); - match io.get().signal(signum, self.chan.clone()) { - Ok(w) => { - self.handles.insert(signum, w); + match LocalIo::maybe_raise(|io| { + io.signal(signum, self.chan.clone()) + }) { + Some(handle) => { + self.handles.insert(signum, handle); true - }, - Err(ioerr) => { - io_error::cond.raise(ioerr); - false } + None => false } } diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index 41337075aa9..0adb83d2015 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -27,13 +27,14 @@ out.write(bytes!("Hello, world!")); */ use fmt; +use io::buffered::LineBufferedWriter; +use io::{Reader, Writer, io_error, IoError, OtherIoError, + standard_error, EndOfFile}; use libc; use option::{Option, Some, None}; use result::{Ok, Err}; -use io::buffered::LineBufferedWriter; use rt::rtio::{DontClose, IoFactory, LocalIo, RtioFileStream, RtioTTY}; -use super::{Reader, Writer, io_error, IoError, OtherIoError, - standard_error, EndOfFile}; +use vec; // And so begins the tale of acquiring a uv handle to a stdio stream on all // platforms in all situations. Our story begins by splitting the world into two @@ -69,19 +70,12 @@ enum StdSource { } fn src(fd: libc::c_int, readable: bool, f: |StdSource| -> T) -> T { - let mut io = LocalIo::borrow(); - match io.get().tty_open(fd, readable) { - Ok(tty) => f(TTY(tty)), - Err(_) => { - // It's not really that desirable if these handles are closed - // synchronously, and because they're squirreled away in a task - // structure the destructors will be run when the task is - // attempted to get destroyed. This means that if we run a - // synchronous destructor we'll attempt to do some scheduling - // operations which will just result in sadness. - f(File(io.get().fs_from_raw_fd(fd, DontClose))) - } - } + LocalIo::maybe_raise(|io| { + Ok(match io.tty_open(fd, readable) { + Ok(tty) => f(TTY(tty)), + Err(_) => f(File(io.fs_from_raw_fd(fd, DontClose))), + }) + }).unwrap() } /// Creates a new non-blocking handle to the stdin of the current process. diff --git a/src/libstd/io/timer.rs b/src/libstd/io/timer.rs index c86e1a1890b..7c9aa28bfe9 100644 --- a/src/libstd/io/timer.rs +++ b/src/libstd/io/timer.rs @@ -39,9 +39,7 @@ loop { */ use comm::Port; -use option::{Option, Some, None}; -use result::{Ok, Err}; -use io::io_error; +use option::Option; use rt::rtio::{IoFactory, LocalIo, RtioTimer}; pub struct Timer { @@ -60,15 +58,7 @@ impl Timer { /// for a number of milliseconds, or to possibly create channels which will /// get notified after an amount of time has passed. pub fn new() -> Option { - let mut io = LocalIo::borrow(); - match io.get().timer_init() { - Ok(t) => Some(Timer { obj: t }), - Err(ioerr) => { - debug!("Timer::init: failed to init: {:?}", ioerr); - io_error::cond.raise(ioerr); - None - } - } + LocalIo::maybe_raise(|io| io.timer_init().map(|t| Timer { obj: t })) } /// Blocks the current task for `msecs` milliseconds. diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index b54231421e3..7207c1a8134 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -93,36 +93,50 @@ impl<'a> Drop for LocalIo<'a> { impl<'a> LocalIo<'a> { /// Returns the local I/O: either the local scheduler's I/O services or /// the native I/O services. - pub fn borrow() -> LocalIo { - use rt::sched::Scheduler; - use rt::local::Local; + pub fn borrow() -> Option { + // XXX: This is currently very unsafely implemented. We don't actually + // *take* the local I/O so there's a very real possibility that we + // can have two borrows at once. Currently there is not a clear way + // to actually borrow the local I/O factory safely because even if + // ownership were transferred down to the functions that the I/O + // factory implements it's just too much of a pain to know when to + // relinquish ownership back into the local task (but that would be + // the safe way of implementing this function). + // + // In order to get around this, we just transmute a copy out of the task + // in order to have what is likely a static lifetime (bad). + let mut t: ~Task = Local::take(); + let ret = t.local_io().map(|t| { + unsafe { cast::transmute_copy(&t) } + }); + Local::put(t); + return ret; + } - unsafe { - // First, attempt to use the local scheduler's I/O services - let sched: Option<*mut Scheduler> = Local::try_unsafe_borrow(); - match sched { - Some(sched) => { - match (*sched).event_loop.io() { - Some(factory) => { - return LocalIo { - factory: factory, - } - } - None => {} + pub fn maybe_raise(f: |io: &mut IoFactory| -> Result) + -> Option + { + match LocalIo::borrow() { + None => { + io::io_error::cond.raise(io::standard_error(io::IoUnavailable)); + None + } + Some(mut io) => { + match f(io.get()) { + Ok(t) => Some(t), + Err(ioerr) => { + io::io_error::cond.raise(ioerr); + None } } - None => {} - } - // If we don't have a scheduler or the scheduler doesn't have I/O - // services, then fall back to the native I/O services. - let native_io: &'static mut native::IoFactory = - &mut NATIVE_IO_FACTORY; - LocalIo { - factory: native_io as &mut IoFactory:'static } } } + pub fn new<'a>(io: &'a mut IoFactory) -> LocalIo<'a> { + LocalIo { factory: io } + } + /// Returns the underlying I/O factory as a trait reference. #[inline] pub fn get<'a>(&'a mut self) -> &'a mut IoFactory { -- cgit 1.4.1-3-g733a5 From dd19785f963fd1045e53447add17ab36ca41fc79 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 12 Dec 2013 17:32:35 -0800 Subject: std: Handle prints with literally no context Printing is an incredibly useful debugging utility, and it's not much help if your debugging prints just trigger an obscure abort when you need them most. In order to handle this case, forcibly fall back to a libc::write implementation of printing whenever a local task is not available. Note that this is *not* a 1:1 fallback. All 1:1 rust tasks will still have a local Task that it can go through (and stdio will be created through the local IO factory), this is only a fallback for "no context" rust code (such as that setting up the context). --- src/libstd/io/stdio.rs | 12 +++++++++++- src/libstd/rt/util.rs | 17 ++++++++++++++--- 2 files changed, 25 insertions(+), 4 deletions(-) (limited to 'src/libstd/rt') diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index 0adb83d2015..88047aecda2 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -132,7 +132,17 @@ fn with_task_stdout(f: |&mut Writer|) { } None => { - let mut io = stdout(); + struct Stdout; + impl Writer for Stdout { + fn write(&mut self, data: &[u8]) { + unsafe { + libc::write(libc::STDOUT_FILENO, + vec::raw::to_ptr(data) as *libc::c_void, + data.len() as libc::size_t); + } + } + } + let mut io = Stdout; f(&mut io as &mut Writer); } } diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index 93721986f3c..2f3e5be39e6 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -68,11 +68,22 @@ pub fn default_sched_threads() -> uint { } pub fn dumb_println(args: &fmt::Arguments) { - use io::native::file::FileDesc; use io; use libc; - let mut out = FileDesc::new(libc::STDERR_FILENO, false); - fmt::writeln(&mut out as &mut io::Writer, args); + use vec; + + struct Stderr; + impl io::Writer for Stderr { + fn write(&mut self, data: &[u8]) { + unsafe { + libc::write(libc::STDERR_FILENO, + vec::raw::to_ptr(data) as *libc::c_void, + data.len() as libc::size_t); + } + } + } + let mut w = Stderr; + fmt::writeln(&mut w as &mut io::Writer, args); } pub fn abort(msg: &str) -> ! { -- cgit 1.4.1-3-g733a5 From 51abdee5f1ad932671350fdd8a7911fe144d08b8 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 12 Dec 2013 18:01:59 -0800 Subject: green: Rip the bandaid off, introduce libgreen This extracts everything related to green scheduling from libstd and introduces a new libgreen crate. This mostly involves deleting most of std::rt and moving it to libgreen. Along with the movement of code, this commit rearchitects many functions in the scheduler in order to adapt to the fact that Local::take now *only* works on a Task, not a scheduler. This mostly just involved threading the current green task through in a few locations, but there were one or two spots where things got hairy. There are a few repercussions of this commit: * tube/rc have been removed (the runtime implementation of rc) * There is no longer a "single threaded" spawning mode for tasks. This is now encompassed by 1:1 scheduling + communication. Convenience methods have been introduced that are specific to libgreen to assist in the spawning of pools of schedulers. --- src/libextra/task_pool.rs | 18 +- src/libgreen/basic.rs | 225 +++++++ src/libgreen/context.rs | 284 +++++++++ src/libgreen/coroutine.rs | 62 ++ src/libgreen/lib.rs | 351 +++++++++++ src/libgreen/macros.rs | 130 ++++ src/libgreen/sched.rs | 1404 +++++++++++++++++++++++++++++++++++++++++ src/libgreen/sleeper_list.rs | 46 ++ src/libgreen/stack.rs | 75 +++ src/libgreen/task.rs | 505 +++++++++++++++ src/libstd/rt/basic.rs | 230 ------- src/libstd/rt/borrowck.rs | 11 +- src/libstd/rt/context.rs | 463 -------------- src/libstd/rt/env.rs | 2 +- src/libstd/rt/kill.rs | 318 ---------- src/libstd/rt/local.rs | 79 +-- src/libstd/rt/mod.rs | 64 +- src/libstd/rt/rc.rs | 139 ---- src/libstd/rt/rtio.rs | 11 +- src/libstd/rt/sched.rs | 1395 ---------------------------------------- src/libstd/rt/sleeper_list.rs | 47 -- src/libstd/rt/stack.rs | 78 --- src/libstd/rt/task.rs | 673 +++++++------------- src/libstd/rt/thread.rs | 6 + src/libstd/rt/tube.rs | 170 ----- src/libstd/rt/unwind.rs | 72 ++- src/libstd/rt/util.rs | 11 - src/libstd/run.rs | 8 +- src/libstd/task.rs | 745 ++++++++++++++++++++++ src/libstd/task/mod.rs | 799 ----------------------- src/libstd/task/spawn.rs | 233 ------- src/libstd/unstable/lang.rs | 16 +- src/libstd/unstable/mod.rs | 1 + src/libsyntax/ext/expand.rs | 4 +- 34 files changed, 4165 insertions(+), 4510 deletions(-) create mode 100644 src/libgreen/basic.rs create mode 100644 src/libgreen/context.rs create mode 100644 src/libgreen/coroutine.rs create mode 100644 src/libgreen/lib.rs create mode 100644 src/libgreen/macros.rs create mode 100644 src/libgreen/sched.rs create mode 100644 src/libgreen/sleeper_list.rs create mode 100644 src/libgreen/stack.rs create mode 100644 src/libgreen/task.rs delete mode 100644 src/libstd/rt/basic.rs delete mode 100644 src/libstd/rt/context.rs delete mode 100644 src/libstd/rt/kill.rs delete mode 100644 src/libstd/rt/rc.rs delete mode 100644 src/libstd/rt/sched.rs delete mode 100644 src/libstd/rt/sleeper_list.rs delete mode 100644 src/libstd/rt/stack.rs delete mode 100644 src/libstd/rt/tube.rs create mode 100644 src/libstd/task.rs delete mode 100644 src/libstd/task/mod.rs delete mode 100644 src/libstd/task/spawn.rs (limited to 'src/libstd/rt') diff --git a/src/libextra/task_pool.rs b/src/libextra/task_pool.rs index f0c9833adf8..649a9a06644 100644 --- a/src/libextra/task_pool.rs +++ b/src/libextra/task_pool.rs @@ -14,7 +14,6 @@ /// parallelism. -use std::task::SchedMode; use std::task; use std::vec; @@ -46,7 +45,6 @@ impl TaskPool { /// returns a function which, given the index of the task, should return /// local data to be kept around in that task. pub fn new(n_tasks: uint, - opt_sched_mode: Option, init_fn_factory: || -> proc(uint) -> T) -> TaskPool { assert!(n_tasks >= 1); @@ -65,18 +63,8 @@ impl TaskPool { } }; - // Start the task. - match opt_sched_mode { - None => { - // Run on this scheduler. - task::spawn(task_body); - } - Some(sched_mode) => { - let mut task = task::task(); - task.sched_mode(sched_mode); - task.spawn(task_body); - } - } + // Run on this scheduler. + task::spawn(task_body); chan }); @@ -99,7 +87,7 @@ fn test_task_pool() { let g: proc(uint) -> uint = proc(i) i; g }; - let mut pool = TaskPool::new(4, Some(SingleThreaded), f); + let mut pool = TaskPool::new(4, f); 8.times(|| { pool.execute(proc(i) println!("Hello from thread {}!", *i)); }) diff --git a/src/libgreen/basic.rs b/src/libgreen/basic.rs new file mode 100644 index 00000000000..6140da08b68 --- /dev/null +++ b/src/libgreen/basic.rs @@ -0,0 +1,225 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! This is a basic event loop implementation not meant for any "real purposes" +//! other than testing the scheduler and proving that it's possible to have a +//! pluggable event loop. +//! +//! This implementation is also used as the fallback implementation of an event +//! loop if no other one is provided (and M:N scheduling is desired). + +use std::cast; +use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausibleIdleCallback, + Callback}; +use std::unstable::sync::Exclusive; +use std::util; + +/// This is the only exported function from this module. +pub fn event_loop() -> ~EventLoop { + ~BasicLoop::new() as ~EventLoop +} + +struct BasicLoop { + work: ~[proc()], // pending work + idle: Option<*mut BasicPausable>, // only one is allowed + remotes: ~[(uint, ~Callback)], + next_remote: uint, + messages: Exclusive<~[Message]>, +} + +enum Message { RunRemote(uint), RemoveRemote(uint) } + +impl BasicLoop { + fn new() -> BasicLoop { + BasicLoop { + work: ~[], + idle: None, + next_remote: 0, + remotes: ~[], + messages: Exclusive::new(~[]), + } + } + + /// Process everything in the work queue (continually) + fn work(&mut self) { + while self.work.len() > 0 { + for work in util::replace(&mut self.work, ~[]).move_iter() { + work(); + } + } + } + + fn remote_work(&mut self) { + let messages = unsafe { + self.messages.with(|messages| { + if messages.len() > 0 { + Some(util::replace(messages, ~[])) + } else { + None + } + }) + }; + let messages = match messages { + Some(m) => m, None => return + }; + for message in messages.iter() { + self.message(*message); + } + } + + fn message(&mut self, message: Message) { + match message { + RunRemote(i) => { + match self.remotes.mut_iter().find(|& &(id, _)| id == i) { + Some(&(_, ref mut f)) => f.call(), + None => unreachable!() + } + } + RemoveRemote(i) => { + match self.remotes.iter().position(|&(id, _)| id == i) { + Some(i) => { self.remotes.remove(i); } + None => unreachable!() + } + } + } + } + + /// Run the idle callback if one is registered + fn idle(&mut self) { + unsafe { + match self.idle { + Some(idle) => { + if (*idle).active { + (*idle).work.call(); + } + } + None => {} + } + } + } + + fn has_idle(&self) -> bool { + unsafe { self.idle.is_some() && (**self.idle.get_ref()).active } + } +} + +impl EventLoop for BasicLoop { + fn run(&mut self) { + // Not exactly efficient, but it gets the job done. + while self.remotes.len() > 0 || self.work.len() > 0 || self.has_idle() { + + self.work(); + self.remote_work(); + + if self.has_idle() { + self.idle(); + continue + } + + unsafe { + // We block here if we have no messages to process and we may + // receive a message at a later date + self.messages.hold_and_wait(|messages| { + self.remotes.len() > 0 && + messages.len() == 0 && + self.work.len() == 0 + }) + } + } + } + + fn callback(&mut self, f: proc()) { + self.work.push(f); + } + + // XXX: Seems like a really weird requirement to have an event loop provide. + fn pausable_idle_callback(&mut self, cb: ~Callback) -> ~PausableIdleCallback { + let callback = ~BasicPausable::new(self, cb); + rtassert!(self.idle.is_none()); + unsafe { + let cb_ptr: &*mut BasicPausable = cast::transmute(&callback); + self.idle = Some(*cb_ptr); + } + return callback as ~PausableIdleCallback; + } + + fn remote_callback(&mut self, f: ~Callback) -> ~RemoteCallback { + let id = self.next_remote; + self.next_remote += 1; + self.remotes.push((id, f)); + ~BasicRemote::new(self.messages.clone(), id) as ~RemoteCallback + } + + fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> { None } +} + +struct BasicRemote { + queue: Exclusive<~[Message]>, + id: uint, +} + +impl BasicRemote { + fn new(queue: Exclusive<~[Message]>, id: uint) -> BasicRemote { + BasicRemote { queue: queue, id: id } + } +} + +impl RemoteCallback for BasicRemote { + fn fire(&mut self) { + unsafe { + self.queue.hold_and_signal(|queue| { + queue.push(RunRemote(self.id)); + }) + } + } +} + +impl Drop for BasicRemote { + fn drop(&mut self) { + unsafe { + self.queue.hold_and_signal(|queue| { + queue.push(RemoveRemote(self.id)); + }) + } + } +} + +struct BasicPausable { + eloop: *mut BasicLoop, + work: ~Callback, + active: bool, +} + +impl BasicPausable { + fn new(eloop: &mut BasicLoop, cb: ~Callback) -> BasicPausable { + BasicPausable { + active: false, + work: cb, + eloop: eloop, + } + } +} + +impl PausableIdleCallback for BasicPausable { + fn pause(&mut self) { + self.active = false; + } + fn resume(&mut self) { + self.active = true; + } +} + +impl Drop for BasicPausable { + fn drop(&mut self) { + unsafe { + (*self.eloop).idle = None; + } + } +} diff --git a/src/libgreen/context.rs b/src/libgreen/context.rs new file mode 100644 index 00000000000..24e35627ddd --- /dev/null +++ b/src/libgreen/context.rs @@ -0,0 +1,284 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::libc::c_void; +use std::uint; +use std::cast::{transmute, transmute_mut_unsafe, + transmute_region, transmute_mut_region}; +use std::unstable::stack; + +use stack::StackSegment; + +// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing +// SSE regs. It would be marginally better not to do this. In C++ we +// use an attribute on a struct. +// FIXME #7761: It would be nice to define regs as `~Option` since +// the registers are sometimes empty, but the discriminant would +// then misalign the regs again. +pub struct Context { + /// The context entry point, saved here for later destruction + priv start: ~Option, + /// Hold the registers while the task or scheduler is suspended + priv regs: ~Registers, + /// Lower bound and upper bound for the stack + priv stack_bounds: Option<(uint, uint)>, +} + +impl Context { + pub fn empty() -> Context { + Context { + start: ~None, + regs: new_regs(), + stack_bounds: None, + } + } + + /// Create a new context that will resume execution by running proc() + pub fn new(start: proc(), stack: &mut StackSegment) -> Context { + // The C-ABI function that is the task entry point + extern fn task_start_wrapper(f: &mut Option) { + f.take_unwrap()() + } + + let sp: *uint = stack.end(); + let sp: *mut uint = unsafe { transmute_mut_unsafe(sp) }; + // Save and then immediately load the current context, + // which we will then modify to call the given function when restored + let mut regs = new_regs(); + unsafe { + rust_swap_registers(transmute_mut_region(&mut *regs), + transmute_region(&*regs)); + }; + + // FIXME #7767: Putting main into a ~ so it's a thin pointer and can + // be passed to the spawn function. Another unfortunate + // allocation + let box = ~Some(start); + initialize_call_frame(&mut *regs, + task_start_wrapper as *c_void, + unsafe { transmute(&*box) }, + sp); + + // Scheduler tasks don't have a stack in the "we allocated it" sense, + // but rather they run on pthreads stacks. We have complete control over + // them in terms of the code running on them (and hopefully they don't + // overflow). Additionally, their coroutine stacks are listed as being + // zero-length, so that's how we detect what's what here. + let stack_base: *uint = stack.start(); + let bounds = if sp as uint == stack_base as uint { + None + } else { + Some((stack_base as uint, sp as uint)) + }; + return Context { + start: box, + regs: regs, + stack_bounds: bounds, + } + } + + /* Switch contexts + + Suspend the current execution context and resume another by + saving the registers values of the executing thread to a Context + then loading the registers from a previously saved Context. + */ + pub fn swap(out_context: &mut Context, in_context: &Context) { + rtdebug!("swapping contexts"); + let out_regs: &mut Registers = match out_context { + &Context { regs: ~ref mut r, .. } => r + }; + let in_regs: &Registers = match in_context { + &Context { regs: ~ref r, .. } => r + }; + + rtdebug!("noting the stack limit and doing raw swap"); + + unsafe { + // Right before we switch to the new context, set the new context's + // stack limit in the OS-specified TLS slot. This also means that + // we cannot call any more rust functions after record_stack_bounds + // returns because they would all likely fail due to the limit being + // invalid for the current task. Lucky for us `rust_swap_registers` + // is a C function so we don't have to worry about that! + match in_context.stack_bounds { + Some((lo, hi)) => stack::record_stack_bounds(lo, hi), + // If we're going back to one of the original contexts or + // something that's possibly not a "normal task", then reset + // the stack limit to 0 to make morestack never fail + None => stack::record_stack_bounds(0, uint::max_value), + } + rust_swap_registers(out_regs, in_regs) + } + } +} + +#[link(name = "rustrt", kind = "static")] +extern { + fn rust_swap_registers(out_regs: *mut Registers, in_regs: *Registers); +} + +// Register contexts used in various architectures +// +// These structures all represent a context of one task throughout its +// execution. Each struct is a representation of the architecture's register +// set. When swapping between tasks, these register sets are used to save off +// the current registers into one struct, and load them all from another. +// +// Note that this is only used for context switching, which means that some of +// the registers may go unused. For example, for architectures with +// callee/caller saved registers, the context will only reflect the callee-saved +// registers. This is because the caller saved registers are already stored +// elsewhere on the stack (if it was necessary anyway). +// +// Additionally, there may be fields on various architectures which are unused +// entirely because they only reflect what is theoretically possible for a +// "complete register set" to show, but user-space cannot alter these registers. +// An example of this would be the segment selectors for x86. +// +// These structures/functions are roughly in-sync with the source files inside +// of src/rt/arch/$arch. The only currently used function from those folders is +// the `rust_swap_registers` function, but that's only because for now segmented +// stacks are disabled. + +#[cfg(target_arch = "x86")] +struct Registers { + eax: u32, ebx: u32, ecx: u32, edx: u32, + ebp: u32, esi: u32, edi: u32, esp: u32, + cs: u16, ds: u16, ss: u16, es: u16, fs: u16, gs: u16, + eflags: u32, eip: u32 +} + +#[cfg(target_arch = "x86")] +fn new_regs() -> ~Registers { + ~Registers { + eax: 0, ebx: 0, ecx: 0, edx: 0, + ebp: 0, esi: 0, edi: 0, esp: 0, + cs: 0, ds: 0, ss: 0, es: 0, fs: 0, gs: 0, + eflags: 0, eip: 0 + } +} + +#[cfg(target_arch = "x86")] +fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, + sp: *mut uint) { + + let sp = align_down(sp); + let sp = mut_offset(sp, -4); + + unsafe { *sp = arg as uint }; + let sp = mut_offset(sp, -1); + unsafe { *sp = 0 }; // The final return address + + regs.esp = sp as u32; + regs.eip = fptr as u32; + + // Last base pointer on the stack is 0 + regs.ebp = 0; +} + +// windows requires saving more registers (both general and XMM), so the windows +// register context must be larger. +#[cfg(windows, target_arch = "x86_64")] +type Registers = [uint, ..34]; +#[cfg(not(windows), target_arch = "x86_64")] +type Registers = [uint, ..22]; + +#[cfg(windows, target_arch = "x86_64")] +fn new_regs() -> ~Registers { ~([0, .. 34]) } +#[cfg(not(windows), target_arch = "x86_64")] +fn new_regs() -> ~Registers { ~([0, .. 22]) } + +#[cfg(target_arch = "x86_64")] +fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, + sp: *mut uint) { + + // Redefinitions from rt/arch/x86_64/regs.h + static RUSTRT_ARG0: uint = 3; + static RUSTRT_RSP: uint = 1; + static RUSTRT_IP: uint = 8; + static RUSTRT_RBP: uint = 2; + + let sp = align_down(sp); + let sp = mut_offset(sp, -1); + + // The final return address. 0 indicates the bottom of the stack + unsafe { *sp = 0; } + + rtdebug!("creating call frame"); + rtdebug!("fptr {}", fptr); + rtdebug!("arg {}", arg); + rtdebug!("sp {}", sp); + + regs[RUSTRT_ARG0] = arg as uint; + regs[RUSTRT_RSP] = sp as uint; + regs[RUSTRT_IP] = fptr as uint; + + // Last base pointer on the stack should be 0 + regs[RUSTRT_RBP] = 0; +} + +#[cfg(target_arch = "arm")] +type Registers = [uint, ..32]; + +#[cfg(target_arch = "arm")] +fn new_regs() -> ~Registers { ~([0, .. 32]) } + +#[cfg(target_arch = "arm")] +fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, + sp: *mut uint) { + let sp = align_down(sp); + // sp of arm eabi is 8-byte aligned + let sp = mut_offset(sp, -2); + + // The final return address. 0 indicates the bottom of the stack + unsafe { *sp = 0; } + + regs[0] = arg as uint; // r0 + regs[13] = sp as uint; // #53 sp, r13 + regs[14] = fptr as uint; // #60 pc, r15 --> lr +} + +#[cfg(target_arch = "mips")] +type Registers = [uint, ..32]; + +#[cfg(target_arch = "mips")] +fn new_regs() -> ~Registers { ~([0, .. 32]) } + +#[cfg(target_arch = "mips")] +fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, + sp: *mut uint) { + let sp = align_down(sp); + // sp of mips o32 is 8-byte aligned + let sp = mut_offset(sp, -2); + + // The final return address. 0 indicates the bottom of the stack + unsafe { *sp = 0; } + + regs[4] = arg as uint; + regs[29] = sp as uint; + regs[25] = fptr as uint; + regs[31] = fptr as uint; +} + +fn align_down(sp: *mut uint) -> *mut uint { + unsafe { + let sp: uint = transmute(sp); + let sp = sp & !(16 - 1); + transmute::(sp) + } +} + +// ptr::mut_offset is positive ints only +#[inline] +pub fn mut_offset(ptr: *mut T, count: int) -> *mut T { + use std::mem::size_of; + (ptr as int + count * (size_of::() as int)) as *mut T +} diff --git a/src/libgreen/coroutine.rs b/src/libgreen/coroutine.rs new file mode 100644 index 00000000000..7bc5d0accfe --- /dev/null +++ b/src/libgreen/coroutine.rs @@ -0,0 +1,62 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// Coroutines represent nothing more than a context and a stack +// segment. + +use std::rt::env; + +use context::Context; +use stack::{StackPool, StackSegment}; + +/// A coroutine is nothing more than a (register context, stack) pair. +pub struct Coroutine { + /// The segment of stack on which the task is currently running or + /// if the task is blocked, on which the task will resume + /// execution. + /// + /// Servo needs this to be public in order to tell SpiderMonkey + /// about the stack bounds. + current_stack_segment: StackSegment, + + /// Always valid if the task is alive and not running. + saved_context: Context +} + +impl Coroutine { + pub fn new(stack_pool: &mut StackPool, + stack_size: Option, + start: proc()) + -> Coroutine { + let stack_size = match stack_size { + Some(size) => size, + None => env::min_stack() + }; + let mut stack = stack_pool.take_segment(stack_size); + let initial_context = Context::new(start, &mut stack); + Coroutine { + current_stack_segment: stack, + saved_context: initial_context + } + } + + pub fn empty() -> Coroutine { + Coroutine { + current_stack_segment: StackSegment::new(0), + saved_context: Context::empty() + } + } + + /// Destroy coroutine and try to reuse std::stack segment. + pub fn recycle(self, stack_pool: &mut StackPool) { + let Coroutine { current_stack_segment, .. } = self; + stack_pool.give_segment(current_stack_segment); + } +} diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs new file mode 100644 index 00000000000..193b64ff7e5 --- /dev/null +++ b/src/libgreen/lib.rs @@ -0,0 +1,351 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The "green scheduling" library +//! +//! This library provides M:N threading for rust programs. Internally this has +//! the implementation of a green scheduler along with context switching and a +//! stack-allocation strategy. +//! +//! This can be optionally linked in to rust programs in order to provide M:N +//! functionality inside of 1:1 programs. + +#[link(name = "green", + package_id = "green", + vers = "0.9-pre", + uuid = "20c38f8c-bfea-83ed-a068-9dc05277be26", + url = "https://github.com/mozilla/rust/tree/master/src/libgreen")]; + +#[license = "MIT/ASL2"]; +#[crate_type = "rlib"]; +#[crate_type = "dylib"]; + +// NB this does *not* include globs, please keep it that way. +#[feature(macro_rules)]; + +use std::cast; +use std::os; +use std::rt::thread::Thread; +use std::rt; +use std::rt::crate_map; +use std::rt::task::Task; +use std::rt::rtio; +use std::sync::deque; +use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT}; +use std::task::TaskResult; +use std::vec; +use std::util; + +use sched::{Shutdown, Scheduler, SchedHandle}; +use sleeper_list::SleeperList; +use task::{GreenTask, HomeSched}; + +mod macros; + +pub mod basic; +pub mod context; +pub mod coroutine; +pub mod sched; +pub mod sleeper_list; +pub mod stack; +pub mod task; + +#[cfg(stage0)] +#[lang = "start"] +pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { + do start(argc, argv) { + let main: extern "Rust" fn() = unsafe { cast::transmute(main) }; + main(); + } +} + +/// Set up a default runtime configuration, given compiler-supplied arguments. +/// +/// This function will block the current thread of execution until the entire +/// pool of M:N schedulers have exited. +/// +/// # Arguments +/// +/// * `argc` & `argv` - The argument vector. On Unix this information is used +/// by os::args. +/// * `main` - The initial procedure to run inside of the M:N scheduling pool. +/// Once this procedure exits, the scheduling pool will begin to shut +/// down. The entire pool (and this function) will only return once +/// all child tasks have finished executing. +/// +/// # Return value +/// +/// The return value is used as the process return code. 0 on success, 101 on +/// error. +pub fn start(argc: int, argv: **u8, main: proc()) -> int { + rt::init(argc, argv); + let exit_code = run(main); + // unsafe is ok b/c we're sure that the runtime is gone + unsafe { rt::cleanup() } + exit_code +} + +/// Execute the main function in a pool of M:N schedulers. +/// +/// Configures the runtime according to the environment, by default +/// using a task scheduler with the same number of threads as cores. +/// Returns a process exit code. +/// +/// This function will not return until all schedulers in the associated pool +/// have returned. +pub fn run(main: proc()) -> int { + let config = Config { + shutdown_after_main_exits: true, + ..Config::new() + }; + Pool::spawn(config, main).wait(); + os::get_exit_status() +} + +/// Configuration of how an M:N pool of schedulers is spawned. +pub struct Config { + /// If this flag is set, then when schedulers are spawned via the `start` + /// and `run` functions the thread invoking `start` and `run` will have a + /// scheduler spawned on it. This scheduler will be "special" in that the + /// main task will be pinned to the scheduler and it will not participate in + /// work stealing. + /// + /// If the `spawn` function is used to create a pool of schedulers, then + /// this option has no effect. + use_main_thread: bool, + + /// The number of schedulers (OS threads) to spawn into this M:N pool. + threads: uint, + + /// When the main function exits, this flag will dictate whether a shutdown + /// is requested of all schedulers. If this flag is `true`, this means that + /// schedulers will shut down as soon as possible after the main task exits + /// (but some may stay alive longer for things like I/O or other tasks). + /// + /// If this flag is `false`, then no action is taken when the `main` task + /// exits. The scheduler pool is then shut down via the `wait()` function. + shutdown_after_main_exits: bool, +} + +impl Config { + /// Returns the default configuration, as determined the the environment + /// variables of this process. + pub fn new() -> Config { + Config { + use_main_thread: false, + threads: rt::default_sched_threads(), + shutdown_after_main_exits: false, + } + } +} + +/// A structure representing a handle to a pool of schedulers. This handle is +/// used to keep the pool alive and also reap the status from the pool. +pub struct Pool { + priv threads: ~[Thread<()>], + priv handles: Option<~[SchedHandle]>, +} + +impl Pool { + /// Execute the main function in a pool of M:N schedulers. + /// + /// This will configure the pool according to the `config` parameter, and + /// initially run `main` inside the pool of schedulers. + pub fn spawn(config: Config, main: proc()) -> Pool { + static mut POOL_ID: AtomicUint = INIT_ATOMIC_UINT; + + let Config { + threads: nscheds, + use_main_thread: use_main_sched, + shutdown_after_main_exits + } = config; + + let mut main = Some(main); + let pool_id = unsafe { POOL_ID.fetch_add(1, SeqCst) }; + + // The shared list of sleeping schedulers. + let sleepers = SleeperList::new(); + + // Create a work queue for each scheduler, ntimes. Create an extra + // for the main thread if that flag is set. We won't steal from it. + let mut pool = deque::BufferPool::new(); + let arr = vec::from_fn(nscheds, |_| pool.deque()); + let (workers, stealers) = vec::unzip(arr.move_iter()); + + // The schedulers. + let mut scheds = ~[]; + // Handles to the schedulers. When the main task ends these will be + // sent the Shutdown message to terminate the schedulers. + let mut handles = ~[]; + + for worker in workers.move_iter() { + rtdebug!("inserting a regular scheduler"); + + // Every scheduler is driven by an I/O event loop. + let loop_ = new_event_loop(); + let mut sched = ~Scheduler::new(pool_id, + loop_, + worker, + stealers.clone(), + sleepers.clone()); + let handle = sched.make_handle(); + + scheds.push(sched); + handles.push(handle); + } + + // If we need a main-thread task then create a main thread scheduler + // that will reject any task that isn't pinned to it + let main_sched = if use_main_sched { + + // Create a friend handle. + let mut friend_sched = scheds.pop(); + let friend_handle = friend_sched.make_handle(); + scheds.push(friend_sched); + + // This scheduler needs a queue that isn't part of the stealee + // set. + let (worker, _) = pool.deque(); + + let main_loop = new_event_loop(); + let mut main_sched = ~Scheduler::new_special(pool_id, + main_loop, + worker, + stealers.clone(), + sleepers.clone(), + false, + Some(friend_handle)); + let mut main_handle = main_sched.make_handle(); + // Allow the scheduler to exit when the main task exits. + // Note: sending the shutdown message also prevents the scheduler + // from pushing itself to the sleeper list, which is used for + // waking up schedulers for work stealing; since this is a + // non-work-stealing scheduler it should not be adding itself + // to the list. + main_handle.send(Shutdown); + Some(main_sched) + } else { + None + }; + + // The pool of schedulers that will be returned from this function + let mut pool = Pool { threads: ~[], handles: None }; + + // When the main task exits, after all the tasks in the main + // task tree, shut down the schedulers and set the exit code. + let mut on_exit = if shutdown_after_main_exits { + let handles = handles; + Some(proc(exit_success: TaskResult) { + let mut handles = handles; + for handle in handles.mut_iter() { + handle.send(Shutdown); + } + if exit_success.is_err() { + os::set_exit_status(rt::DEFAULT_ERROR_CODE); + } + }) + } else { + pool.handles = Some(handles); + None + }; + + if !use_main_sched { + + // In the case where we do not use a main_thread scheduler we + // run the main task in one of our threads. + + let mut main = GreenTask::new(&mut scheds[0].stack_pool, None, + main.take_unwrap()); + let mut main_task = ~Task::new(); + main_task.name = Some(SendStrStatic("
")); + main_task.death.on_exit = on_exit.take(); + main.put_task(main_task); + + let sched = scheds.pop(); + let main = main; + let thread = do Thread::start { + sched.bootstrap(main); + }; + pool.threads.push(thread); + } + + // Run each remaining scheduler in a thread. + for sched in scheds.move_rev_iter() { + rtdebug!("creating regular schedulers"); + let thread = do Thread::start { + let mut sched = sched; + let mut task = do GreenTask::new(&mut sched.stack_pool, None) { + rtdebug!("boostraping a non-primary scheduler"); + }; + task.put_task(~Task::new()); + sched.bootstrap(task); + }; + pool.threads.push(thread); + } + + // If we do have a main thread scheduler, run it now. + + if use_main_sched { + rtdebug!("about to create the main scheduler task"); + + let mut main_sched = main_sched.unwrap(); + + let home = HomeSched(main_sched.make_handle()); + let mut main = GreenTask::new_homed(&mut main_sched.stack_pool, None, + home, main.take_unwrap()); + let mut main_task = ~Task::new(); + main_task.name = Some(SendStrStatic("
")); + main_task.death.on_exit = on_exit.take(); + main.put_task(main_task); + rtdebug!("bootstrapping main_task"); + + main_sched.bootstrap(main); + } + + return pool; + } + + /// Waits for the pool of schedulers to exit. If the pool was spawned to + /// shutdown after the main task exits, this will simply wait for all the + /// scheudlers to exit. If the pool was not spawned like that, this function + /// will trigger shutdown of all the active schedulers. The schedulers will + /// exit once all tasks in this pool of schedulers has exited. + pub fn wait(&mut self) { + match self.handles.take() { + Some(mut handles) => { + for handle in handles.mut_iter() { + handle.send(Shutdown); + } + } + None => {} + } + + for thread in util::replace(&mut self.threads, ~[]).move_iter() { + thread.join(); + } + } +} + +fn new_event_loop() -> ~rtio::EventLoop { + match crate_map::get_crate_map() { + None => {} + Some(map) => { + match map.event_loop_factory { + None => {} + Some(factory) => return factory() + } + } + } + + // If the crate map didn't specify a factory to create an event loop, then + // instead just use a basic event loop missing all I/O services to at least + // get the scheduler running. + return basic::event_loop(); +} diff --git a/src/libgreen/macros.rs b/src/libgreen/macros.rs new file mode 100644 index 00000000000..ad0854e2b1e --- /dev/null +++ b/src/libgreen/macros.rs @@ -0,0 +1,130 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// XXX: this file probably shouldn't exist + +#[macro_escape]; + +use std::fmt; +use std::libc; + +// Indicates whether we should perform expensive sanity checks, including rtassert! +// XXX: Once the runtime matures remove the `true` below to turn off rtassert, etc. +pub static ENFORCE_SANITY: bool = true || !cfg!(rtopt) || cfg!(rtdebug) || cfg!(rtassert); + +macro_rules! rterrln ( + ($($arg:tt)*) => ( { + format_args!(::macros::dumb_println, $($arg)*) + } ) +) + +// Some basic logging. Enabled by passing `--cfg rtdebug` to the libstd build. +macro_rules! rtdebug ( + ($($arg:tt)*) => ( { + if cfg!(rtdebug) { + rterrln!($($arg)*) + } + }) +) + +macro_rules! rtassert ( + ( $arg:expr ) => ( { + if ::macros::ENFORCE_SANITY { + if !$arg { + rtabort!(" assertion failed: {}", stringify!($arg)); + } + } + } ) +) + + +macro_rules! rtabort ( + ($($arg:tt)*) => ( { + ::macros::abort(format!($($arg)*)); + } ) +) + +pub fn dumb_println(args: &fmt::Arguments) { + use std::io; + use std::libc; + use std::vec; + + struct Stderr; + impl io::Writer for Stderr { + fn write(&mut self, data: &[u8]) { + unsafe { + libc::write(libc::STDERR_FILENO, + vec::raw::to_ptr(data) as *libc::c_void, + data.len() as libc::size_t); + } + } + } + let mut w = Stderr; + fmt::writeln(&mut w as &mut io::Writer, args); +} + +pub fn abort(msg: &str) -> ! { + let msg = if !msg.is_empty() { msg } else { "aborted" }; + let hash = msg.chars().fold(0, |accum, val| accum + (val as uint) ); + let quote = match hash % 10 { + 0 => " +It was from the artists and poets that the pertinent answers came, and I +know that panic would have broken loose had they been able to compare notes. +As it was, lacking their original letters, I half suspected the compiler of +having asked leading questions, or of having edited the correspondence in +corroboration of what he had latently resolved to see.", + 1 => " +There are not many persons who know what wonders are opened to them in the +stories and visions of their youth; for when as children we listen and dream, +we think but half-formed thoughts, and when as men we try to remember, we are +dulled and prosaic with the poison of life. But some of us awake in the night +with strange phantasms of enchanted hills and gardens, of fountains that sing +in the sun, of golden cliffs overhanging murmuring seas, of plains that stretch +down to sleeping cities of bronze and stone, and of shadowy companies of heroes +that ride caparisoned white horses along the edges of thick forests; and then +we know that we have looked back through the ivory gates into that world of +wonder which was ours before we were wise and unhappy.", + 2 => " +Instead of the poems I had hoped for, there came only a shuddering blackness +and ineffable loneliness; and I saw at last a fearful truth which no one had +ever dared to breathe before — the unwhisperable secret of secrets — The fact +that this city of stone and stridor is not a sentient perpetuation of Old New +York as London is of Old London and Paris of Old Paris, but that it is in fact +quite dead, its sprawling body imperfectly embalmed and infested with queer +animate things which have nothing to do with it as it was in life.", + 3 => " +The ocean ate the last of the land and poured into the smoking gulf, thereby +giving up all it had ever conquered. From the new-flooded lands it flowed +again, uncovering death and decay; and from its ancient and immemorial bed it +trickled loathsomely, uncovering nighted secrets of the years when Time was +young and the gods unborn. Above the waves rose weedy remembered spires. The +moon laid pale lilies of light on dead London, and Paris stood up from its damp +grave to be sanctified with star-dust. Then rose spires and monoliths that were +weedy but not remembered; terrible spires and monoliths of lands that men never +knew were lands...", + 4 => " +There was a night when winds from unknown spaces whirled us irresistibly into +limitless vacuum beyond all thought and entity. Perceptions of the most +maddeningly untransmissible sort thronged upon us; perceptions of infinity +which at the time convulsed us with joy, yet which are now partly lost to my +memory and partly incapable of presentation to others.", + _ => "You've met with a terrible fate, haven't you?" + }; + rterrln!("{}", ""); + rterrln!("{}", quote); + rterrln!("{}", ""); + rterrln!("fatal runtime error: {}", msg); + + abort(); + + fn abort() -> ! { + unsafe { libc::abort() } + } +} diff --git a/src/libgreen/sched.rs b/src/libgreen/sched.rs new file mode 100644 index 00000000000..b0a49f2450a --- /dev/null +++ b/src/libgreen/sched.rs @@ -0,0 +1,1404 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cast; +use std::rand::{XorShiftRng, Rng, Rand}; +use std::rt::local::Local; +use std::rt::rtio::{RemoteCallback, PausibleIdleCallback, Callback, EventLoop}; +use std::rt::task::BlockedTask; +use std::rt::task::Task; +use std::sync::deque; +use std::unstable::mutex::Mutex; +use std::unstable::raw; +use mpsc = std::sync::mpsc_queue; + +use context::Context; +use coroutine::Coroutine; +use sleeper_list::SleeperList; +use stack::StackPool; +use task::{TypeSched, GreenTask, HomeSched, AnySched}; + +/// A scheduler is responsible for coordinating the execution of Tasks +/// on a single thread. The scheduler runs inside a slightly modified +/// Rust Task. When not running this task is stored in the scheduler +/// struct. The scheduler struct acts like a baton, all scheduling +/// actions are transfers of the baton. +/// +/// XXX: This creates too many callbacks to run_sched_once, resulting +/// in too much allocation and too many events. +pub struct Scheduler { + /// ID number of the pool that this scheduler is a member of. When + /// reawakening green tasks, this is used to ensure that tasks aren't + /// reawoken on the wrong pool of schedulers. + pool_id: uint, + /// There are N work queues, one per scheduler. + work_queue: deque::Worker<~GreenTask>, + /// Work queues for the other schedulers. These are created by + /// cloning the core work queues. + work_queues: ~[deque::Stealer<~GreenTask>], + /// The queue of incoming messages from other schedulers. + /// These are enqueued by SchedHandles after which a remote callback + /// is triggered to handle the message. + message_queue: mpsc::Consumer, + /// Producer used to clone sched handles from + message_producer: mpsc::Producer, + /// A shared list of sleeping schedulers. We'll use this to wake + /// up schedulers when pushing work onto the work queue. + sleeper_list: SleeperList, + /// Indicates that we have previously pushed a handle onto the + /// SleeperList but have not yet received the Wake message. + /// Being `true` does not necessarily mean that the scheduler is + /// not active since there are multiple event sources that may + /// wake the scheduler. It just prevents the scheduler from pushing + /// multiple handles onto the sleeper list. + sleepy: bool, + /// A flag to indicate we've received the shutdown message and should + /// no longer try to go to sleep, but exit instead. + no_sleep: bool, + stack_pool: StackPool, + /// The scheduler runs on a special task. When it is not running + /// it is stored here instead of the work queue. + sched_task: Option<~GreenTask>, + /// An action performed after a context switch on behalf of the + /// code running before the context switch + cleanup_job: Option, + /// If the scheduler shouldn't run some tasks, a friend to send + /// them to. + friend_handle: Option, + /// Should this scheduler run any task, or only pinned tasks? + run_anything: bool, + /// A fast XorShift rng for scheduler use + rng: XorShiftRng, + /// A togglable idle callback + idle_callback: Option<~PausableIdleCallback>, + /// A countdown that starts at a random value and is decremented + /// every time a yield check is performed. When it hits 0 a task + /// will yield. + yield_check_count: uint, + /// A flag to tell the scheduler loop it needs to do some stealing + /// in order to introduce randomness as part of a yield + steal_for_yield: bool, + + // n.b. currently destructors of an object are run in top-to-bottom in order + // of field declaration. Due to its nature, the pausable idle callback + // must have some sort of handle to the event loop, so it needs to get + // destroyed before the event loop itself. For this reason, we destroy + // the event loop last to ensure that any unsafe references to it are + // destroyed before it's actually destroyed. + + /// The event loop used to drive the scheduler and perform I/O + event_loop: ~EventLoop, +} + +/// An indication of how hard to work on a given operation, the difference +/// mainly being whether memory is synchronized or not +#[deriving(Eq)] +enum EffortLevel { + DontTryTooHard, + GiveItYourBest +} + +static MAX_YIELD_CHECKS: uint = 20000; + +fn reset_yield_check(rng: &mut XorShiftRng) -> uint { + let r: uint = Rand::rand(rng); + r % MAX_YIELD_CHECKS + 1 +} + +impl Scheduler { + + // * Initialization Functions + + pub fn new(pool_id: uint, + event_loop: ~EventLoop, + work_queue: deque::Worker<~GreenTask>, + work_queues: ~[deque::Stealer<~GreenTask>], + sleeper_list: SleeperList) + -> Scheduler { + + Scheduler::new_special(pool_id, event_loop, work_queue, work_queues, + sleeper_list, true, None) + + } + + pub fn new_special(pool_id: uint, + event_loop: ~EventLoop, + work_queue: deque::Worker<~GreenTask>, + work_queues: ~[deque::Stealer<~GreenTask>], + sleeper_list: SleeperList, + run_anything: bool, + friend: Option) + -> Scheduler { + + let (consumer, producer) = mpsc::queue(()); + let mut sched = Scheduler { + pool_id: pool_id, + sleeper_list: sleeper_list, + message_queue: consumer, + message_producer: producer, + sleepy: false, + no_sleep: false, + event_loop: event_loop, + work_queue: work_queue, + work_queues: work_queues, + stack_pool: StackPool::new(), + sched_task: None, + cleanup_job: None, + run_anything: run_anything, + friend_handle: friend, + rng: new_sched_rng(), + idle_callback: None, + yield_check_count: 0, + steal_for_yield: false + }; + + sched.yield_check_count = reset_yield_check(&mut sched.rng); + + return sched; + } + + // XXX: This may eventually need to be refactored so that + // the scheduler itself doesn't have to call event_loop.run. + // That will be important for embedding the runtime into external + // event loops. + + // Take a main task to run, and a scheduler to run it in. Create a + // scheduler task and bootstrap into it. + pub fn bootstrap(mut ~self, task: ~GreenTask) { + + // Build an Idle callback. + let cb = ~SchedRunner as ~Callback; + self.idle_callback = Some(self.event_loop.pausable_idle_callback(cb)); + + // Create a task for the scheduler with an empty context. + let mut sched_task = GreenTask::new_typed(Some(Coroutine::empty()), + TypeSched); + sched_task.put_task(~Task::new()); + + // Before starting our first task, make sure the idle callback + // is active. As we do not start in the sleep state this is + // important. + self.idle_callback.get_mut_ref().resume(); + + // Now, as far as all the scheduler state is concerned, we are inside + // the "scheduler" context. So we can act like the scheduler and resume + // the provided task. Let it think that the currently running task is + // actually the sched_task so it knows where to squirrel it away. + let mut sched_task = self.resume_task_immediately(sched_task, task); + + // Now we are back in the scheduler context, having + // successfully run the input task. Start by running the + // scheduler. Grab it out of TLS - performing the scheduler + // action will have given it away. + let sched = sched_task.sched.take_unwrap(); + rtdebug!("starting scheduler {}", sched.sched_id()); + let mut sched_task = sched.run(sched_task); + + // Close the idle callback. + let mut sched = sched_task.sched.take_unwrap(); + sched.idle_callback.take(); + // Make one go through the loop to run the close callback. + let mut stask = sched.run(sched_task); + + // Now that we are done with the scheduler, clean up the + // scheduler task. Do so by removing it from TLS and manually + // cleaning up the memory it uses. As we didn't actually call + // task.run() on the scheduler task we never get through all + // the cleanup code it runs. + rtdebug!("stopping scheduler {}", stask.sched.get_ref().sched_id()); + + // Should not have any messages + let message = stask.sched.get_mut_ref().message_queue.pop(); + rtassert!(match message { mpsc::Empty => true, _ => false }); + + stask.task.get_mut_ref().destroyed = true; + } + + // This does not return a scheduler, as the scheduler is placed + // inside the task. + pub fn run(mut ~self, stask: ~GreenTask) -> ~GreenTask { + + // This is unsafe because we need to place the scheduler, with + // the event_loop inside, inside our task. But we still need a + // mutable reference to the event_loop to give it the "run" + // command. + unsafe { + let event_loop: *mut ~EventLoop = &mut self.event_loop; + // Our scheduler must be in the task before the event loop + // is started. + stask.put_with_sched(self); + (*event_loop).run(); + } + + // This is a serious code smell, but this function could be done away + // with if necessary. The ownership of `stask` was transferred into + // local storage just before the event loop ran, so it is possible to + // transmute `stask` as a uint across the running of the event loop to + // re-acquire ownership here. + // + // This would involve removing the Task from TLS, removing the runtime, + // forgetting the runtime, and then putting the task into `stask`. For + // now, because we have `GreenTask::convert`, I chose to take this + // method for cleanliness. This function is *not* a fundamental reason + // why this function should exist. + GreenTask::convert(Local::take()) + } + + // * Execution Functions - Core Loop Logic + + // The model for this function is that you continue through it + // until you either use the scheduler while performing a schedule + // action, in which case you give it away and return early, or + // you reach the end and sleep. In the case that a scheduler + // action is performed the loop is evented such that this function + // is called again. + fn run_sched_once(mut ~self, stask: ~GreenTask) { + // Make sure that we're not lying in that the `stask` argument is indeed + // the scheduler task for this scheduler. + assert!(self.sched_task.is_none()); + + // Assume that we need to continue idling unless we reach the + // end of this function without performing an action. + self.idle_callback.get_mut_ref().resume(); + + // First we check for scheduler messages, these are higher + // priority than regular tasks. + let (sched, stask) = + match self.interpret_message_queue(stask, DontTryTooHard) { + Some(pair) => pair, + None => return + }; + + // This helper will use a randomized work-stealing algorithm + // to find work. + let (sched, stask) = match sched.do_work(stask) { + Some(pair) => pair, + None => return + }; + + // Now, before sleeping we need to find out if there really + // were any messages. Give it your best! + let (mut sched, stask) = + match sched.interpret_message_queue(stask, GiveItYourBest) { + Some(pair) => pair, + None => return + }; + + // If we got here then there was no work to do. + // Generate a SchedHandle and push it to the sleeper list so + // somebody can wake us up later. + if !sched.sleepy && !sched.no_sleep { + rtdebug!("scheduler has no work to do, going to sleep"); + sched.sleepy = true; + let handle = sched.make_handle(); + sched.sleeper_list.push(handle); + // Since we are sleeping, deactivate the idle callback. + sched.idle_callback.get_mut_ref().pause(); + } else { + rtdebug!("not sleeping, already doing so or no_sleep set"); + // We may not be sleeping, but we still need to deactivate + // the idle callback. + sched.idle_callback.get_mut_ref().pause(); + } + + // Finished a cycle without using the Scheduler. Place it back + // in TLS. + stask.put_with_sched(sched); + } + + // This function returns None if the scheduler is "used", or it + // returns the still-available scheduler. At this point all + // message-handling will count as a turn of work, and as a result + // return None. + fn interpret_message_queue(mut ~self, stask: ~GreenTask, + effort: EffortLevel) + -> Option<(~Scheduler, ~GreenTask)> + { + + let msg = if effort == DontTryTooHard { + self.message_queue.casual_pop() + } else { + // When popping our message queue, we could see an "inconsistent" + // state which means that we *should* be able to pop data, but we + // are unable to at this time. Our options are: + // + // 1. Spin waiting for data + // 2. Ignore this and pretend we didn't find a message + // + // If we choose route 1, then if the pusher in question is currently + // pre-empted, we're going to take up our entire time slice just + // spinning on this queue. If we choose route 2, then the pusher in + // question is still guaranteed to make a send() on its async + // handle, so we will guaranteed wake up and see its message at some + // point. + // + // I have chosen to take route #2. + match self.message_queue.pop() { + mpsc::Data(t) => Some(t), + mpsc::Empty | mpsc::Inconsistent => None + } + }; + + match msg { + Some(PinnedTask(task)) => { + let mut task = task; + task.give_home(HomeSched(self.make_handle())); + self.resume_task_immediately(stask, task).put(); + return None; + } + Some(TaskFromFriend(task)) => { + rtdebug!("got a task from a friend. lovely!"); + self.process_task(stask, task, + Scheduler::resume_task_immediately_cl); + return None; + } + Some(RunOnce(task)) => { + // bypass the process_task logic to force running this task once + // on this home scheduler. This is often used for I/O (homing). + self.resume_task_immediately(stask, task).put(); + return None; + } + Some(Wake) => { + self.sleepy = false; + stask.put_with_sched(self); + return None; + } + Some(Shutdown) => { + rtdebug!("shutting down"); + if self.sleepy { + // There may be an outstanding handle on the + // sleeper list. Pop them all to make sure that's + // not the case. + loop { + match self.sleeper_list.pop() { + Some(handle) => { + let mut handle = handle; + handle.send(Wake); + } + None => break + } + } + } + // No more sleeping. After there are no outstanding + // event loop references we will shut down. + self.no_sleep = true; + self.sleepy = false; + stask.put_with_sched(self); + return None; + } + None => { + return Some((self, stask)); + } + } + } + + fn do_work(mut ~self, stask: ~GreenTask) -> Option<(~Scheduler, ~GreenTask)> { + rtdebug!("scheduler calling do work"); + match self.find_work() { + Some(task) => { + rtdebug!("found some work! running the task"); + self.process_task(stask, task, + Scheduler::resume_task_immediately_cl); + return None; + } + None => { + rtdebug!("no work was found, returning the scheduler struct"); + return Some((self, stask)); + } + } + } + + // Workstealing: In this iteration of the runtime each scheduler + // thread has a distinct work queue. When no work is available + // locally, make a few attempts to steal work from the queues of + // other scheduler threads. If a few steals fail we end up in the + // old "no work" path which is fine. + + // First step in the process is to find a task. This function does + // that by first checking the local queue, and if there is no work + // there, trying to steal from the remote work queues. + fn find_work(&mut self) -> Option<~GreenTask> { + rtdebug!("scheduler looking for work"); + if !self.steal_for_yield { + match self.work_queue.pop() { + Some(task) => { + rtdebug!("found a task locally"); + return Some(task) + } + None => { + rtdebug!("scheduler trying to steal"); + return self.try_steals(); + } + } + } else { + // During execution of the last task, it performed a 'yield', + // so we're doing some work stealing in order to introduce some + // scheduling randomness. Otherwise we would just end up popping + // that same task again. This is pretty lame and is to work around + // the problem that work stealing is not designed for 'non-strict' + // (non-fork-join) task parallelism. + self.steal_for_yield = false; + match self.try_steals() { + Some(task) => { + rtdebug!("stole a task after yielding"); + return Some(task); + } + None => { + rtdebug!("did not steal a task after yielding"); + // Back to business + return self.find_work(); + } + } + } + } + + // Try stealing from all queues the scheduler knows about. This + // naive implementation can steal from our own queue or from other + // special schedulers. + fn try_steals(&mut self) -> Option<~GreenTask> { + let work_queues = &mut self.work_queues; + let len = work_queues.len(); + let start_index = self.rng.gen_range(0, len); + for index in range(0, len).map(|i| (i + start_index) % len) { + match work_queues[index].steal() { + deque::Data(task) => { + rtdebug!("found task by stealing"); + return Some(task) + } + _ => () + } + }; + rtdebug!("giving up on stealing"); + return None; + } + + // * Task Routing Functions - Make sure tasks send up in the right + // place. + + fn process_task(mut ~self, cur: ~GreenTask, + mut next: ~GreenTask, schedule_fn: SchedulingFn) { + rtdebug!("processing a task"); + + match next.take_unwrap_home() { + HomeSched(home_handle) => { + if home_handle.sched_id != self.sched_id() { + rtdebug!("sending task home"); + next.give_home(HomeSched(home_handle)); + Scheduler::send_task_home(next); + cur.put_with_sched(self); + } else { + rtdebug!("running task here"); + next.give_home(HomeSched(home_handle)); + schedule_fn(self, cur, next); + } + } + AnySched if self.run_anything => { + rtdebug!("running anysched task here"); + next.give_home(AnySched); + schedule_fn(self, cur, next); + } + AnySched => { + rtdebug!("sending task to friend"); + next.give_home(AnySched); + self.send_to_friend(next); + cur.put_with_sched(self); + } + } + } + + fn send_task_home(task: ~GreenTask) { + let mut task = task; + match task.take_unwrap_home() { + HomeSched(mut home_handle) => home_handle.send(PinnedTask(task)), + AnySched => rtabort!("error: cannot send anysched task home"), + } + } + + /// Take a non-homed task we aren't allowed to run here and send + /// it to the designated friend scheduler to execute. + fn send_to_friend(&mut self, task: ~GreenTask) { + rtdebug!("sending a task to friend"); + match self.friend_handle { + Some(ref mut handle) => { + handle.send(TaskFromFriend(task)); + } + None => { + rtabort!("tried to send task to a friend but scheduler has no friends"); + } + } + } + + /// Schedule a task to be executed later. + /// + /// Pushes the task onto the work stealing queue and tells the + /// event loop to run it later. Always use this instead of pushing + /// to the work queue directly. + pub fn enqueue_task(&mut self, task: ~GreenTask) { + + // We push the task onto our local queue clone. + assert!(!task.is_sched()); + self.work_queue.push(task); + self.idle_callback.get_mut_ref().resume(); + + // We've made work available. Notify a + // sleeping scheduler. + + match self.sleeper_list.casual_pop() { + Some(handle) => { + let mut handle = handle; + handle.send(Wake) + } + None => { (/* pass */) } + }; + } + + // * Core Context Switching Functions + + // The primary function for changing contexts. In the current + // design the scheduler is just a slightly modified GreenTask, so + // all context swaps are from GreenTask to GreenTask. The only difference + // between the various cases is where the inputs come from, and + // what is done with the resulting task. That is specified by the + // cleanup function f, which takes the scheduler and the + // old task as inputs. + + pub fn change_task_context(mut ~self, + current_task: ~GreenTask, + mut next_task: ~GreenTask, + f: |&mut Scheduler, ~GreenTask|) -> ~GreenTask { + let f_opaque = ClosureConverter::from_fn(f); + + let current_task_dupe = unsafe { + *cast::transmute::<&~GreenTask, &uint>(¤t_task) + }; + + // The current task is placed inside an enum with the cleanup + // function. This enum is then placed inside the scheduler. + self.cleanup_job = Some(CleanupJob::new(current_task, f_opaque)); + + // The scheduler is then placed inside the next task. + next_task.sched = Some(self); + + // However we still need an internal mutable pointer to the + // original task. The strategy here was "arrange memory, then + // get pointers", so we crawl back up the chain using + // transmute to eliminate borrowck errors. + unsafe { + + let sched: &mut Scheduler = + cast::transmute_mut_region(*next_task.sched.get_mut_ref()); + + let current_task: &mut GreenTask = match sched.cleanup_job { + Some(CleanupJob { task: ref task, .. }) => { + let task_ptr: *~GreenTask = task; + cast::transmute_mut_region(*cast::transmute_mut_unsafe(task_ptr)) + } + None => { + rtabort!("no cleanup job"); + } + }; + + let (current_task_context, next_task_context) = + Scheduler::get_contexts(current_task, next_task); + + // Done with everything - put the next task in TLS. This + // works because due to transmute the borrow checker + // believes that we have no internal pointers to + // next_task. + cast::forget(next_task); + + // The raw context swap operation. The next action taken + // will be running the cleanup job from the context of the + // next task. + Context::swap(current_task_context, next_task_context); + } + + // When the context swaps back to this task we immediately + // run the cleanup job, as expected by the previously called + // swap_contexts function. + let mut current_task: ~GreenTask = unsafe { + cast::transmute(current_task_dupe) + }; + current_task.sched.get_mut_ref().run_cleanup_job(); + + // See the comments in switch_running_tasks_and_then for why a lock + // is acquired here. This is the resumption points and the "bounce" + // that it is referring to. + unsafe { + current_task.nasty_deschedule_lock.lock(); + current_task.nasty_deschedule_lock.unlock(); + } + return current_task; + } + + // Returns a mutable reference to both contexts involved in this + // swap. This is unsafe - we are getting mutable internal + // references to keep even when we don't own the tasks. It looks + // kinda safe because we are doing transmutes before passing in + // the arguments. + pub fn get_contexts<'a>(current_task: &mut GreenTask, next_task: &mut GreenTask) -> + (&'a mut Context, &'a mut Context) { + let current_task_context = + &mut current_task.coroutine.get_mut_ref().saved_context; + let next_task_context = + &mut next_task.coroutine.get_mut_ref().saved_context; + unsafe { + (cast::transmute_mut_region(current_task_context), + cast::transmute_mut_region(next_task_context)) + } + } + + // * Context Swapping Helpers - Here be ugliness! + + pub fn resume_task_immediately(~self, cur: ~GreenTask, + next: ~GreenTask) -> ~GreenTask { + assert!(cur.is_sched()); + self.change_task_context(cur, next, |sched, stask| { + assert!(sched.sched_task.is_none()); + sched.sched_task = Some(stask); + }) + } + + fn resume_task_immediately_cl(sched: ~Scheduler, + cur: ~GreenTask, + next: ~GreenTask) { + sched.resume_task_immediately(cur, next).put() + } + + /// Block a running task, context switch to the scheduler, then pass the + /// blocked task to a closure. + /// + /// # Safety note + /// + /// The closure here is a *stack* closure that lives in the + /// running task. It gets transmuted to the scheduler's lifetime + /// and called while the task is blocked. + /// + /// This passes a Scheduler pointer to the fn after the context switch + /// in order to prevent that fn from performing further scheduling operations. + /// Doing further scheduling could easily result in infinite recursion. + /// + /// Note that if the closure provided relinquishes ownership of the + /// BlockedTask, then it is possible for the task to resume execution before + /// the closure has finished executing. This would naturally introduce a + /// race if the closure and task shared portions of the environment. + /// + /// This situation is currently prevented, or in other words it is + /// guaranteed that this function will not return before the given closure + /// has returned. + pub fn deschedule_running_task_and_then(mut ~self, + cur: ~GreenTask, + f: |&mut Scheduler, BlockedTask|) { + // Trickier - we need to get the scheduler task out of self + // and use it as the destination. + let stask = self.sched_task.take_unwrap(); + // Otherwise this is the same as below. + self.switch_running_tasks_and_then(cur, stask, f) + } + + pub fn switch_running_tasks_and_then(~self, + cur: ~GreenTask, + next: ~GreenTask, + f: |&mut Scheduler, BlockedTask|) { + // And here comes one of the sad moments in which a lock is used in a + // core portion of the rust runtime. As always, this is highly + // undesirable, so there's a good reason behind it. + // + // There is an excellent outline of the problem in issue #8132, and it's + // summarized in that `f` is executed on a sched task, but its + // environment is on the previous task. If `f` relinquishes ownership of + // the BlockedTask, then it may introduce a race where `f` is using the + // environment as well as the code after the 'deschedule' block. + // + // The solution we have chosen to adopt for now is to acquire a + // task-local lock around this block. The resumption of the task in + // context switching will bounce on the lock, thereby waiting for this + // block to finish, eliminating the race mentioned above. + // fail!("should never return!"); + // + // To actually maintain a handle to the lock, we use an unsafe pointer + // to it, but we're guaranteed that the task won't exit until we've + // unlocked the lock so there's no worry of this memory going away. + let cur = self.change_task_context(cur, next, |sched, mut task| { + let lock: *mut Mutex = &mut task.nasty_deschedule_lock; + unsafe { (*lock).lock() } + f(sched, BlockedTask::block(task.swap())); + unsafe { (*lock).unlock() } + }); + cur.put(); + } + + fn switch_task(sched: ~Scheduler, cur: ~GreenTask, next: ~GreenTask) { + sched.change_task_context(cur, next, |sched, last_task| { + if last_task.is_sched() { + assert!(sched.sched_task.is_none()); + sched.sched_task = Some(last_task); + } else { + sched.enqueue_task(last_task); + } + }).put() + } + + // * Task Context Helpers + + /// Called by a running task to end execution, after which it will + /// be recycled by the scheduler for reuse in a new task. + pub fn terminate_current_task(mut ~self, cur: ~GreenTask) { + // Similar to deschedule running task and then, but cannot go through + // the task-blocking path. The task is already dying. + let stask = self.sched_task.take_unwrap(); + let _cur = self.change_task_context(cur, stask, |sched, mut dead_task| { + let coroutine = dead_task.coroutine.take_unwrap(); + coroutine.recycle(&mut sched.stack_pool); + }); + fail!("should never return!"); + } + + pub fn run_task(~self, cur: ~GreenTask, next: ~GreenTask) { + self.process_task(cur, next, Scheduler::switch_task); + } + + pub fn run_task_later(mut cur: ~GreenTask, next: ~GreenTask) { + let mut sched = cur.sched.take_unwrap(); + sched.enqueue_task(next); + cur.put_with_sched(sched); + } + + /// Yield control to the scheduler, executing another task. This is guaranteed + /// to introduce some amount of randomness to the scheduler. Currently the + /// randomness is a result of performing a round of work stealing (which + /// may end up stealing from the current scheduler). + pub fn yield_now(mut ~self, cur: ~GreenTask) { + if cur.is_sched() { + assert!(self.sched_task.is_none()); + self.run_sched_once(cur); + } else { + self.yield_check_count = reset_yield_check(&mut self.rng); + // Tell the scheduler to start stealing on the next iteration + self.steal_for_yield = true; + let stask = self.sched_task.take_unwrap(); + let cur = self.change_task_context(cur, stask, |sched, task| { + sched.enqueue_task(task); + }); + cur.put() + } + } + + pub fn maybe_yield(mut ~self, cur: ~GreenTask) { + // The number of times to do the yield check before yielding, chosen + // arbitrarily. + rtassert!(self.yield_check_count > 0); + self.yield_check_count -= 1; + if self.yield_check_count == 0 { + self.yield_now(cur); + } else { + cur.put_with_sched(self); + } + } + + + // * Utility Functions + + pub fn sched_id(&self) -> uint { unsafe { cast::transmute(self) } } + + pub fn run_cleanup_job(&mut self) { + let cleanup_job = self.cleanup_job.take_unwrap(); + cleanup_job.run(self) + } + + pub fn make_handle(&mut self) -> SchedHandle { + let remote = self.event_loop.remote_callback(~SchedRunner as ~Callback); + + return SchedHandle { + remote: remote, + queue: self.message_producer.clone(), + sched_id: self.sched_id() + } + } +} + +// Supporting types + +type SchedulingFn = extern "Rust" fn (~Scheduler, ~GreenTask, ~GreenTask); + +pub enum SchedMessage { + Wake, + Shutdown, + PinnedTask(~GreenTask), + TaskFromFriend(~GreenTask), + RunOnce(~GreenTask), +} + +pub struct SchedHandle { + priv remote: ~RemoteCallback, + priv queue: mpsc::Producer, + sched_id: uint +} + +impl SchedHandle { + pub fn send(&mut self, msg: SchedMessage) { + self.queue.push(msg); + self.remote.fire(); + } +} + +struct SchedRunner; + +impl Callback for SchedRunner { + fn call(&mut self) { + // In theory, this function needs to invoke the `run_sched_once` + // function on the scheduler. Sadly, we have no context here, except for + // knowledge of the local `Task`. In order to avoid a call to + // `GreenTask::convert`, we just call `yield_now` and the scheduler will + // detect when a sched task performs a yield vs a green task performing + // a yield (and act accordingly). + // + // This function could be converted to `GreenTask::convert` if + // absolutely necessary, but for cleanliness it is much better to not + // use the conversion function. + let task: ~Task = Local::take(); + task.yield_now(); + } +} + +struct CleanupJob { + task: ~GreenTask, + f: UnsafeTaskReceiver +} + +impl CleanupJob { + pub fn new(task: ~GreenTask, f: UnsafeTaskReceiver) -> CleanupJob { + CleanupJob { + task: task, + f: f + } + } + + pub fn run(self, sched: &mut Scheduler) { + let CleanupJob { task: task, f: f } = self; + f.to_fn()(sched, task) + } +} + +// XXX: Some hacks to put a || closure in Scheduler without borrowck +// complaining +type UnsafeTaskReceiver = raw::Closure; +trait ClosureConverter { + fn from_fn(|&mut Scheduler, ~GreenTask|) -> Self; + fn to_fn(self) -> |&mut Scheduler, ~GreenTask|; +} +impl ClosureConverter for UnsafeTaskReceiver { + fn from_fn(f: |&mut Scheduler, ~GreenTask|) -> UnsafeTaskReceiver { + unsafe { cast::transmute(f) } + } + fn to_fn(self) -> |&mut Scheduler, ~GreenTask| { + unsafe { cast::transmute(self) } + } +} + +// On unix, we read randomness straight from /dev/urandom, but the +// default constructor of an XorShiftRng does this via io::fs, which +// relies on the scheduler existing, so we have to manually load +// randomness. Windows has its own C API for this, so we don't need to +// worry there. +#[cfg(windows)] +fn new_sched_rng() -> XorShiftRng { + XorShiftRng::new() +} +#[cfg(unix)] +fn new_sched_rng() -> XorShiftRng { + use std::libc; + use std::mem; + use std::rand::SeedableRng; + + let fd = "/dev/urandom".with_c_str(|name| { + unsafe { libc::open(name, libc::O_RDONLY, 0) } + }); + if fd == -1 { + rtabort!("could not open /dev/urandom for reading.") + } + + let mut seeds = [0u32, .. 4]; + let size = mem::size_of_val(&seeds); + loop { + let nbytes = unsafe { + libc::read(fd, + seeds.as_mut_ptr() as *mut libc::c_void, + size as libc::size_t) + }; + rtassert!(nbytes as uint == size); + + if !seeds.iter().all(|x| *x == 0) { + break; + } + } + + unsafe {libc::close(fd);} + + SeedableRng::from_seed(seeds) +} + +#[cfg(test)] +mod test { + use borrow::to_uint; + use rt::deque::BufferPool; + use rt::basic; + use rt::sched::{Scheduler}; + use rt::task::{GreenTask, Sched}; + use rt::thread::Thread; + use rt::util; + use task::TaskResult; + use unstable::run_in_bare_thread; + + #[test] + fn trivial_run_in_newsched_task_test() { + let mut task_ran = false; + let task_ran_ptr: *mut bool = &mut task_ran; + do run_in_newsched_task || { + unsafe { *task_ran_ptr = true }; + rtdebug!("executed from the new scheduler") + } + assert!(task_ran); + } + + #[test] + fn multiple_task_test() { + let total = 10; + let mut task_run_count = 0; + let task_run_count_ptr: *mut uint = &mut task_run_count; + do run_in_newsched_task || { + for _ in range(0u, total) { + do spawntask || { + unsafe { *task_run_count_ptr = *task_run_count_ptr + 1}; + } + } + } + assert!(task_run_count == total); + } + + #[test] + fn multiple_task_nested_test() { + let mut task_run_count = 0; + let task_run_count_ptr: *mut uint = &mut task_run_count; + do run_in_newsched_task || { + do spawntask || { + unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; + do spawntask || { + unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; + do spawntask || { + unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; + } + } + } + } + assert!(task_run_count == 3); + } + + // Confirm that a sched_id actually is the uint form of the + // pointer to the scheduler struct. + #[test] + fn simple_sched_id_test() { + do run_in_bare_thread { + let sched = ~new_test_uv_sched(); + assert!(to_uint(sched) == sched.sched_id()); + } + } + + // Compare two scheduler ids that are different, this should never + // fail but may catch a mistake someday. + #[test] + fn compare_sched_id_test() { + do run_in_bare_thread { + let sched_one = ~new_test_uv_sched(); + let sched_two = ~new_test_uv_sched(); + assert!(sched_one.sched_id() != sched_two.sched_id()); + } + } + + + // A very simple test that confirms that a task executing on the + // home scheduler notices that it is home. + #[test] + fn test_home_sched() { + do run_in_bare_thread { + let mut task_ran = false; + let task_ran_ptr: *mut bool = &mut task_ran; + + let mut sched = ~new_test_uv_sched(); + let sched_handle = sched.make_handle(); + + let mut task = ~do GreenTask::new_root_homed(&mut sched.stack_pool, None, + Sched(sched_handle)) { + unsafe { *task_ran_ptr = true }; + assert!(GreenTask::on_appropriate_sched()); + }; + + let on_exit: proc(TaskResult) = proc(exit_status) { + rtassert!(exit_status.is_ok()) + }; + task.death.on_exit = Some(on_exit); + + sched.bootstrap(task); + } + } + + // An advanced test that checks all four possible states that a + // (task,sched) can be in regarding homes. + + #[test] + fn test_schedule_home_states() { + use rt::sleeper_list::SleeperList; + use rt::sched::Shutdown; + use borrow; + + do run_in_bare_thread { + + let sleepers = SleeperList::new(); + let mut pool = BufferPool::new(); + let (normal_worker, normal_stealer) = pool.deque(); + let (special_worker, special_stealer) = pool.deque(); + let queues = ~[normal_stealer, special_stealer]; + + // Our normal scheduler + let mut normal_sched = ~Scheduler::new( + basic::event_loop(), + normal_worker, + queues.clone(), + sleepers.clone()); + + let normal_handle = normal_sched.make_handle(); + + // Our special scheduler + let mut special_sched = ~Scheduler::new_special( + basic::event_loop(), + special_worker, + queues.clone(), + sleepers.clone(), + false, + Some(friend_handle)); + + let special_handle = special_sched.make_handle(); + + let t1_handle = special_sched.make_handle(); + let t4_handle = special_sched.make_handle(); + + // Four test tasks: + // 1) task is home on special + // 2) task not homed, sched doesn't care + // 3) task not homed, sched requeues + // 4) task not home, send home + + let task1 = ~do GreenTask::new_root_homed(&mut special_sched.stack_pool, None, + Sched(t1_handle)) || { + rtassert!(GreenTask::on_appropriate_sched()); + }; + rtdebug!("task1 id: **{}**", borrow::to_uint(task1)); + + let task2 = ~do GreenTask::new_root(&mut normal_sched.stack_pool, None) { + rtassert!(GreenTask::on_appropriate_sched()); + }; + + let task3 = ~do GreenTask::new_root(&mut normal_sched.stack_pool, None) { + rtassert!(GreenTask::on_appropriate_sched()); + }; + + let task4 = ~do GreenTask::new_root_homed(&mut special_sched.stack_pool, None, + Sched(t4_handle)) { + rtassert!(GreenTask::on_appropriate_sched()); + }; + rtdebug!("task4 id: **{}**", borrow::to_uint(task4)); + + // Signal from the special task that we are done. + let (port, chan) = Chan::<()>::new(); + + let normal_task = ~do GreenTask::new_root(&mut normal_sched.stack_pool, None) { + rtdebug!("*about to submit task2*"); + Scheduler::run_task(task2); + rtdebug!("*about to submit task4*"); + Scheduler::run_task(task4); + rtdebug!("*normal_task done*"); + port.recv(); + let mut nh = normal_handle; + nh.send(Shutdown); + let mut sh = special_handle; + sh.send(Shutdown); + }; + + rtdebug!("normal task: {}", borrow::to_uint(normal_task)); + + let special_task = ~do GreenTask::new_root(&mut special_sched.stack_pool, None) { + rtdebug!("*about to submit task1*"); + Scheduler::run_task(task1); + rtdebug!("*about to submit task3*"); + Scheduler::run_task(task3); + rtdebug!("*done with special_task*"); + chan.send(()); + }; + + rtdebug!("special task: {}", borrow::to_uint(special_task)); + + let normal_sched = normal_sched; + let normal_thread = do Thread::start { + normal_sched.bootstrap(normal_task); + rtdebug!("finished with normal_thread"); + }; + + let special_sched = special_sched; + let special_thread = do Thread::start { + special_sched.bootstrap(special_task); + rtdebug!("finished with special_sched"); + }; + + normal_thread.join(); + special_thread.join(); + } + } + + #[test] + fn test_stress_schedule_task_states() { + if util::limit_thread_creation_due_to_osx_and_valgrind() { return; } + let n = stress_factor() * 120; + for _ in range(0, n as int) { + test_schedule_home_states(); + } + } + + #[test] + fn test_io_callback() { + use io::timer; + + // This is a regression test that when there are no schedulable tasks + // in the work queue, but we are performing I/O, that once we do put + // something in the work queue again the scheduler picks it up and doesn't + // exit before emptying the work queue + do run_in_uv_task { + do spawntask { + timer::sleep(10); + } + } + } + + #[test] + fn handle() { + do run_in_bare_thread { + let (port, chan) = Chan::new(); + + let thread_one = do Thread::start { + let chan = chan; + do run_in_newsched_task_core { + chan.send(()); + } + }; + + let thread_two = do Thread::start { + let port = port; + do run_in_newsched_task_core { + port.recv(); + } + }; + + thread_two.join(); + thread_one.join(); + } + } + + // A regression test that the final message is always handled. + // Used to deadlock because Shutdown was never recvd. + #[test] + fn no_missed_messages() { + use rt::sleeper_list::SleeperList; + use rt::stack::StackPool; + use rt::sched::{Shutdown, TaskFromFriend}; + + do run_in_bare_thread { + stress_factor().times(|| { + let sleepers = SleeperList::new(); + let mut pool = BufferPool::new(); + let (worker, stealer) = pool.deque(); + + let mut sched = ~Scheduler::new( + basic::event_loop(), + worker, + ~[stealer], + sleepers.clone()); + + let mut handle = sched.make_handle(); + + let sched = sched; + let thread = do Thread::start { + let mut sched = sched; + let bootstrap_task = + ~GreenTask::new_root(&mut sched.stack_pool, + None, + proc()()); + sched.bootstrap(bootstrap_task); + }; + + let mut stack_pool = StackPool::new(); + let task = ~GreenTask::new_root(&mut stack_pool, None, proc()()); + handle.send(TaskFromFriend(task)); + + handle.send(Shutdown); + drop(handle); + + thread.join(); + }) + } + } + + #[test] + fn multithreading() { + use num::Times; + use vec::OwnedVector; + use container::Container; + + do run_in_mt_newsched_task { + let mut ports = ~[]; + 10.times(|| { + let (port, chan) = Chan::new(); + do spawntask_later { + chan.send(()); + } + ports.push(port); + }); + + while !ports.is_empty() { + ports.pop().recv(); + } + } + } + + #[test] + fn thread_ring() { + do run_in_mt_newsched_task { + let (end_port, end_chan) = Chan::new(); + + let n_tasks = 10; + let token = 2000; + + let (mut p, ch1) = Chan::new(); + ch1.send((token, end_chan)); + let mut i = 2; + while i <= n_tasks { + let (next_p, ch) = Chan::new(); + let imm_i = i; + let imm_p = p; + do spawntask_random { + roundtrip(imm_i, n_tasks, &imm_p, &ch); + }; + p = next_p; + i += 1; + } + let p = p; + do spawntask_random { + roundtrip(1, n_tasks, &p, &ch1); + } + + end_port.recv(); + } + + fn roundtrip(id: int, n_tasks: int, + p: &Port<(int, Chan<()>)>, + ch: &Chan<(int, Chan<()>)>) { + while (true) { + match p.recv() { + (1, end_chan) => { + debug!("{}\n", id); + end_chan.send(()); + return; + } + (token, end_chan) => { + debug!("thread: {} got token: {}", id, token); + ch.send((token - 1, end_chan)); + if token <= n_tasks { + return; + } + } + } + } + } + } + + #[test] + fn start_closure_dtor() { + use ops::Drop; + + // Regression test that the `start` task entrypoint can + // contain dtors that use task resources + do run_in_newsched_task { + struct S { field: () } + + impl Drop for S { + fn drop(&mut self) { + let _foo = @0; + } + } + + let s = S { field: () }; + + do spawntask { + let _ss = &s; + } + } + } + + // FIXME: #9407: xfail-test + #[ignore] + #[test] + fn dont_starve_1() { + stress_factor().times(|| { + do run_in_mt_newsched_task { + let (port, chan) = Chan::new(); + + // This task should not be able to starve the sender; + // The sender should get stolen to another thread. + do spawntask { + while port.try_recv().is_none() { } + } + + chan.send(()); + } + }) + } + + #[test] + fn dont_starve_2() { + stress_factor().times(|| { + do run_in_newsched_task { + let (port, chan) = Chan::new(); + let (_port2, chan2) = Chan::new(); + + // This task should not be able to starve the other task. + // The sends should eventually yield. + do spawntask { + while port.try_recv().is_none() { + chan2.send(()); + } + } + + chan.send(()); + } + }) + } + + // Regression test for a logic bug that would cause single-threaded schedulers + // to sleep forever after yielding and stealing another task. + #[test] + fn single_threaded_yield() { + use task::{spawn, spawn_sched, SingleThreaded, deschedule}; + use num::Times; + + do spawn_sched(SingleThreaded) { + 5.times(|| { deschedule(); }) + } + do spawn { } + do spawn { } + } +} diff --git a/src/libgreen/sleeper_list.rs b/src/libgreen/sleeper_list.rs new file mode 100644 index 00000000000..5be260efdfa --- /dev/null +++ b/src/libgreen/sleeper_list.rs @@ -0,0 +1,46 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Maintains a shared list of sleeping schedulers. Schedulers +//! use this to wake each other up. + +use std::sync::mpmc_bounded_queue::Queue; + +use sched::SchedHandle; + +pub struct SleeperList { + priv q: Queue, +} + +impl SleeperList { + pub fn new() -> SleeperList { + SleeperList{q: Queue::with_capacity(8*1024)} + } + + pub fn push(&mut self, value: SchedHandle) { + assert!(self.q.push(value)) + } + + pub fn pop(&mut self) -> Option { + self.q.pop() + } + + pub fn casual_pop(&mut self) -> Option { + self.q.pop() + } +} + +impl Clone for SleeperList { + fn clone(&self) -> SleeperList { + SleeperList { + q: self.q.clone() + } + } +} diff --git a/src/libgreen/stack.rs b/src/libgreen/stack.rs new file mode 100644 index 00000000000..cf2a3d5f141 --- /dev/null +++ b/src/libgreen/stack.rs @@ -0,0 +1,75 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::vec; +use std::libc::{c_uint, uintptr_t}; + +pub struct StackSegment { + priv buf: ~[u8], + priv valgrind_id: c_uint +} + +impl StackSegment { + pub fn new(size: uint) -> StackSegment { + unsafe { + // Crate a block of uninitialized values + let mut stack = vec::with_capacity(size); + stack.set_len(size); + + let mut stk = StackSegment { + buf: stack, + valgrind_id: 0 + }; + + // XXX: Using the FFI to call a C macro. Slow + stk.valgrind_id = rust_valgrind_stack_register(stk.start(), stk.end()); + return stk; + } + } + + /// Point to the low end of the allocated stack + pub fn start(&self) -> *uint { + self.buf.as_ptr() as *uint + } + + /// Point one word beyond the high end of the allocated stack + pub fn end(&self) -> *uint { + unsafe { + self.buf.as_ptr().offset(self.buf.len() as int) as *uint + } + } +} + +impl Drop for StackSegment { + fn drop(&mut self) { + unsafe { + // XXX: Using the FFI to call a C macro. Slow + rust_valgrind_stack_deregister(self.valgrind_id); + } + } +} + +pub struct StackPool(()); + +impl StackPool { + pub fn new() -> StackPool { StackPool(()) } + + fn take_segment(&self, min_size: uint) -> StackSegment { + StackSegment::new(min_size) + } + + fn give_segment(&self, _stack: StackSegment) { + } +} + +extern { + fn rust_valgrind_stack_register(start: *uintptr_t, end: *uintptr_t) -> c_uint; + fn rust_valgrind_stack_deregister(id: c_uint); +} diff --git a/src/libgreen/task.rs b/src/libgreen/task.rs new file mode 100644 index 00000000000..72e72f2cd99 --- /dev/null +++ b/src/libgreen/task.rs @@ -0,0 +1,505 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! The Green Task implementation +//! +//! This module contains the glue to the libstd runtime necessary to integrate +//! M:N scheduling. This GreenTask structure is hidden as a trait object in all +//! rust tasks and virtual calls are made in order to interface with it. +//! +//! Each green task contains a scheduler if it is currently running, and it also +//! contains the rust task itself in order to juggle around ownership of the +//! values. + +use std::cast; +use std::rt::Runtime; +use std::rt::rtio; +use std::rt::local::Local; +use std::rt::task::{Task, BlockedTask}; +use std::task::TaskOpts; +use std::unstable::mutex::Mutex; + +use coroutine::Coroutine; +use sched::{Scheduler, SchedHandle, RunOnce}; +use stack::StackPool; + +/// The necessary fields needed to keep track of a green task (as opposed to a +/// 1:1 task). +pub struct GreenTask { + coroutine: Option, + handle: Option, + sched: Option<~Scheduler>, + task: Option<~Task>, + task_type: TaskType, + pool_id: uint, + + // See the comments in the scheduler about why this is necessary + nasty_deschedule_lock: Mutex, +} + +pub enum TaskType { + TypeGreen(Option), + TypeSched, +} + +pub enum Home { + AnySched, + HomeSched(SchedHandle), +} + +impl GreenTask { + pub fn new(stack_pool: &mut StackPool, + stack_size: Option, + start: proc()) -> ~GreenTask { + GreenTask::new_homed(stack_pool, stack_size, AnySched, start) + } + + pub fn new_homed(stack_pool: &mut StackPool, + stack_size: Option, + home: Home, + start: proc()) -> ~GreenTask { + let mut ops = GreenTask::new_typed(None, TypeGreen(Some(home))); + let start = GreenTask::build_start_wrapper(start, ops.as_uint()); + ops.coroutine = Some(Coroutine::new(stack_pool, stack_size, start)); + return ops; + } + + pub fn new_typed(coroutine: Option, + task_type: TaskType) -> ~GreenTask { + ~GreenTask { + pool_id: 0, + coroutine: coroutine, + task_type: task_type, + sched: None, + handle: None, + nasty_deschedule_lock: unsafe { Mutex::new() }, + task: None, + } + } + + /// Just like the `maybe_take_runtime` function, this function should *not* + /// exist. Usage of this function is _strongly_ discouraged. This is an + /// absolute last resort necessary for converting a libstd task to a green + /// task. + /// + /// This function will assert that the task is indeed a green task before + /// returning (and will kill the entire process if this is wrong). + pub fn convert(mut task: ~Task) -> ~GreenTask { + match task.maybe_take_runtime::() { + Some(mut green) => { + green.put_task(task); + green + } + None => rtabort!("not a green task any more?"), + } + } + + /// Builds a function which is the actual starting execution point for a + /// rust task. This function is the glue necessary to execute the libstd + /// task and then clean up the green thread after it exits. + /// + /// The second argument to this function is actually a transmuted copy of + /// the `GreenTask` pointer. Context switches in the scheduler silently + /// transfer ownership of the `GreenTask` to the other end of the context + /// switch, so because this is the first code that is running in this task, + /// it must first re-acquire ownership of the green task. + pub fn build_start_wrapper(start: proc(), ops: uint) -> proc() { + proc() { + // First code after swap to this new context. Run our + // cleanup job after we have re-acquired ownership of the green + // task. + let mut task: ~GreenTask = unsafe { GreenTask::from_uint(ops) }; + task.sched.get_mut_ref().run_cleanup_job(); + + // Convert our green task to a libstd task and then execute the code + // requeted. This is the "try/catch" block for this green task and + // is the wrapper for *all* code run in the task. + let mut start = Some(start); + let task = task.swap().run(|| start.take_unwrap()()); + + // Once the function has exited, it's time to run the termination + // routine. This means we need to context switch one more time but + // clean ourselves up on the other end. Since we have no way of + // preserving a handle to the GreenTask down to this point, this + // unfortunately must call `GreenTask::convert`. In order to avoid + // this we could add a `terminate` function to the `Runtime` trait + // in libstd, but that seems less appropriate since the coversion + // method exists. + GreenTask::convert(task).terminate(); + } + } + + pub fn give_home(&mut self, new_home: Home) { + match self.task_type { + TypeGreen(ref mut home) => { *home = Some(new_home); } + TypeSched => rtabort!("type error: used SchedTask as GreenTask"), + } + } + + pub fn take_unwrap_home(&mut self) -> Home { + match self.task_type { + TypeGreen(ref mut home) => home.take_unwrap(), + TypeSched => rtabort!("type error: used SchedTask as GreenTask"), + } + } + + // New utility functions for homes. + + pub fn is_home_no_tls(&self, sched: &Scheduler) -> bool { + match self.task_type { + TypeGreen(Some(AnySched)) => { false } + TypeGreen(Some(HomeSched(SchedHandle { sched_id: ref id, .. }))) => { + *id == sched.sched_id() + } + TypeGreen(None) => { rtabort!("task without home"); } + TypeSched => { + // Awe yea + rtabort!("type error: expected: TypeGreen, found: TaskSched"); + } + } + } + + pub fn homed(&self) -> bool { + match self.task_type { + TypeGreen(Some(AnySched)) => { false } + TypeGreen(Some(HomeSched(SchedHandle { .. }))) => { true } + TypeGreen(None) => { + rtabort!("task without home"); + } + TypeSched => { + rtabort!("type error: expected: TypeGreen, found: TaskSched"); + } + } + } + + pub fn is_sched(&self) -> bool { + match self.task_type { + TypeGreen(..) => false, TypeSched => true, + } + } + + // Unsafe functions for transferring ownership of this GreenTask across + // context switches + + pub fn as_uint(&self) -> uint { + unsafe { cast::transmute(self) } + } + + pub unsafe fn from_uint(val: uint) -> ~GreenTask { cast::transmute(val) } + + // Runtime glue functions and helpers + + pub fn put_with_sched(mut ~self, sched: ~Scheduler) { + assert!(self.sched.is_none()); + self.sched = Some(sched); + self.put(); + } + + pub fn put_task(&mut self, task: ~Task) { + assert!(self.task.is_none()); + self.task = Some(task); + } + + pub fn swap(mut ~self) -> ~Task { + let mut task = self.task.take_unwrap(); + task.put_runtime(self as ~Runtime); + return task; + } + + pub fn put(~self) { + assert!(self.sched.is_some()); + Local::put(self.swap()); + } + + fn terminate(mut ~self) { + let sched = self.sched.take_unwrap(); + sched.terminate_current_task(self); + } + + // This function is used to remotely wakeup this green task back on to its + // original pool of schedulers. In order to do so, each tasks arranges a + // SchedHandle upon descheduling to be available for sending itself back to + // the original pool. + // + // Note that there is an interesting transfer of ownership going on here. We + // must relinquish ownership of the green task, but then also send the task + // over the handle back to the original scheduler. In order to safely do + // this, we leverage the already-present "nasty descheduling lock". The + // reason for doing this is that each task will bounce on this lock after + // resuming after a context switch. By holding the lock over the enqueueing + // of the task, we're guaranteed that the SchedHandle's memory will be valid + // for this entire function. + // + // An alternative would include having incredibly cheaply cloneable handles, + // but right now a SchedHandle is something like 6 allocations, so it is + // *not* a cheap operation to clone a handle. Until the day comes that we + // need to optimize this, a lock should do just fine (it's completely + // uncontended except for when the task is rescheduled). + fn reawaken_remotely(mut ~self) { + unsafe { + let mtx = &mut self.nasty_deschedule_lock as *mut Mutex; + let handle = self.handle.get_mut_ref() as *mut SchedHandle; + (*mtx).lock(); + (*handle).send(RunOnce(self)); + (*mtx).unlock(); + } + } +} + +impl Runtime for GreenTask { + fn yield_now(mut ~self, cur_task: ~Task) { + self.put_task(cur_task); + let sched = self.sched.take_unwrap(); + sched.yield_now(self); + } + + fn maybe_yield(mut ~self, cur_task: ~Task) { + self.put_task(cur_task); + let sched = self.sched.take_unwrap(); + sched.maybe_yield(self); + } + + fn deschedule(mut ~self, times: uint, cur_task: ~Task, + f: |BlockedTask| -> Result<(), BlockedTask>) { + self.put_task(cur_task); + let mut sched = self.sched.take_unwrap(); + + // In order for this task to be reawoken in all possible contexts, we + // may need a handle back in to the current scheduler. When we're woken + // up in anything other than the local scheduler pool, this handle is + // used to send this task back into the scheduler pool. + if self.handle.is_none() { + self.handle = Some(sched.make_handle()); + self.pool_id = sched.pool_id; + } + + // This code is pretty standard, except for the usage of + // `GreenTask::convert`. Right now if we use `reawaken` directly it will + // expect for there to be a task in local TLS, but that is not true for + // this deschedule block (because the scheduler must retain ownership of + // the task while the cleanup job is running). In order to get around + // this for now, we invoke the scheduler directly with the converted + // Task => GreenTask structure. + if times == 1 { + sched.deschedule_running_task_and_then(self, |sched, task| { + match f(task) { + Ok(()) => {} + Err(t) => { + t.wake().map(|t| { + sched.enqueue_task(GreenTask::convert(t)) + }); + } + } + }); + } else { + sched.deschedule_running_task_and_then(self, |sched, task| { + for task in task.make_selectable(times) { + match f(task) { + Ok(()) => {}, + Err(task) => { + task.wake().map(|t| { + sched.enqueue_task(GreenTask::convert(t)) + }); + break + } + } + } + }); + } + } + + fn reawaken(mut ~self, to_wake: ~Task, can_resched: bool) { + self.put_task(to_wake); + assert!(self.sched.is_none()); + + // Waking up a green thread is a bit of a tricky situation. We have no + // guarantee about where the current task is running. The options we + // have for where this current task is running are: + // + // 1. Our original scheduler pool + // 2. Some other scheduler pool + // 3. Something that isn't a scheduler pool + // + // In order to figure out what case we're in, this is the reason that + // the `maybe_take_runtime` function exists. Using this function we can + // dynamically check to see which of these cases is the current + // situation and then dispatch accordingly. + // + // In case 1, we just use the local scheduler to resume ourselves + // immediately (if a rescheduling is possible). + // + // In case 2 and 3, we need to remotely reawaken ourself in order to be + // transplanted back to the correct scheduler pool. + let mut running_task: ~Task = Local::take(); + match running_task.maybe_take_runtime::() { + Some(mut running_green_task) => { + let mut sched = running_green_task.sched.take_unwrap(); + if sched.pool_id == self.pool_id { + running_green_task.put_task(running_task); + if can_resched { + sched.run_task(running_green_task, self); + } else { + sched.enqueue_task(self); + running_green_task.put_with_sched(sched); + } + } else { + self.reawaken_remotely(); + + // put that thing back where it came from! + running_task.put_runtime(running_green_task as ~Runtime); + Local::put(running_task); + } + } + None => { + self.reawaken_remotely(); + Local::put(running_task); + } + } + } + + fn spawn_sibling(mut ~self, cur_task: ~Task, opts: TaskOpts, f: proc()) { + self.put_task(cur_task); + + let TaskOpts { + watched: _watched, + notify_chan, name, stack_size + } = opts; + + // Spawns a task into the current scheduler. We allocate the new task's + // stack from the scheduler's stack pool, and then configure it + // accordingly to `opts`. Afterwards we bootstrap it immediately by + // switching to it. + // + // Upon returning, our task is back in TLS and we're good to return. + let mut sched = self.sched.take_unwrap(); + let mut sibling = GreenTask::new(&mut sched.stack_pool, stack_size, f); + let mut sibling_task = ~Task::new(); + sibling_task.name = name; + match notify_chan { + Some(chan) => { + let on_exit = proc(task_result) { chan.send(task_result) }; + sibling_task.death.on_exit = Some(on_exit); + } + None => {} + } + + sibling.task = Some(sibling_task); + sched.run_task(self, sibling) + } + + // Local I/O is provided by the scheduler's event loop + fn local_io<'a>(&'a mut self) -> Option> { + match self.sched.get_mut_ref().event_loop.io() { + Some(io) => Some(rtio::LocalIo::new(io)), + None => None, + } + } + + fn wrap(~self) -> ~Any { self as ~Any } +} + +impl Drop for GreenTask { + fn drop(&mut self) { + unsafe { self.nasty_deschedule_lock.destroy(); } + } +} + +#[cfg(test)] +mod test { + + #[test] + fn local_heap() { + do run_in_newsched_task() { + let a = @5; + let b = a; + assert!(*a == 5); + assert!(*b == 5); + } + } + + #[test] + fn tls() { + use std::local_data; + do run_in_newsched_task() { + local_data_key!(key: @~str) + local_data::set(key, @~"data"); + assert!(*local_data::get(key, |k| k.map(|k| *k)).unwrap() == ~"data"); + local_data_key!(key2: @~str) + local_data::set(key2, @~"data"); + assert!(*local_data::get(key2, |k| k.map(|k| *k)).unwrap() == ~"data"); + } + } + + #[test] + fn unwind() { + do run_in_newsched_task() { + let result = spawntask_try(proc()()); + rtdebug!("trying first assert"); + assert!(result.is_ok()); + let result = spawntask_try(proc() fail!()); + rtdebug!("trying second assert"); + assert!(result.is_err()); + } + } + + #[test] + fn rng() { + do run_in_uv_task() { + use std::rand::{rng, Rng}; + let mut r = rng(); + let _ = r.next_u32(); + } + } + + #[test] + fn logging() { + do run_in_uv_task() { + info!("here i am. logging in a newsched task"); + } + } + + #[test] + fn comm_stream() { + do run_in_newsched_task() { + let (port, chan) = Chan::new(); + chan.send(10); + assert!(port.recv() == 10); + } + } + + #[test] + fn comm_shared_chan() { + do run_in_newsched_task() { + let (port, chan) = SharedChan::new(); + chan.send(10); + assert!(port.recv() == 10); + } + } + + //#[test] + //fn heap_cycles() { + // use std::option::{Option, Some, None}; + + // do run_in_newsched_task { + // struct List { + // next: Option<@mut List>, + // } + + // let a = @mut List { next: None }; + // let b = @mut List { next: Some(a) }; + + // a.next = Some(b); + // } + //} + + #[test] + #[should_fail] + fn test_begin_unwind() { begin_unwind("cause", file!(), line!()) } +} diff --git a/src/libstd/rt/basic.rs b/src/libstd/rt/basic.rs deleted file mode 100644 index 3589582357c..00000000000 --- a/src/libstd/rt/basic.rs +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! This is a basic event loop implementation not meant for any "real purposes" -//! other than testing the scheduler and proving that it's possible to have a -//! pluggable event loop. - -use prelude::*; - -use cast; -use rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausableIdleCallback, - Callback}; -use unstable::sync::Exclusive; -use io::native; -use util; - -/// This is the only exported function from this module. -pub fn event_loop() -> ~EventLoop { - ~BasicLoop::new() as ~EventLoop -} - -struct BasicLoop { - work: ~[proc()], // pending work - idle: Option<*mut BasicPausable>, // only one is allowed - remotes: ~[(uint, ~Callback)], - next_remote: uint, - messages: Exclusive<~[Message]>, - io: ~IoFactory, -} - -enum Message { RunRemote(uint), RemoveRemote(uint) } - -impl BasicLoop { - fn new() -> BasicLoop { - BasicLoop { - work: ~[], - idle: None, - next_remote: 0, - remotes: ~[], - messages: Exclusive::new(~[]), - io: ~native::IoFactory as ~IoFactory, - } - } - - /// Process everything in the work queue (continually) - fn work(&mut self) { - while self.work.len() > 0 { - for work in util::replace(&mut self.work, ~[]).move_iter() { - work(); - } - } - } - - fn remote_work(&mut self) { - let messages = unsafe { - self.messages.with(|messages| { - if messages.len() > 0 { - Some(util::replace(messages, ~[])) - } else { - None - } - }) - }; - let messages = match messages { - Some(m) => m, None => return - }; - for message in messages.iter() { - self.message(*message); - } - } - - fn message(&mut self, message: Message) { - match message { - RunRemote(i) => { - match self.remotes.mut_iter().find(|& &(id, _)| id == i) { - Some(&(_, ref mut f)) => f.call(), - None => unreachable!() - } - } - RemoveRemote(i) => { - match self.remotes.iter().position(|&(id, _)| id == i) { - Some(i) => { self.remotes.remove(i); } - None => unreachable!() - } - } - } - } - - /// Run the idle callback if one is registered - fn idle(&mut self) { - unsafe { - match self.idle { - Some(idle) => { - if (*idle).active { - (*idle).work.call(); - } - } - None => {} - } - } - } - - fn has_idle(&self) -> bool { - unsafe { self.idle.is_some() && (**self.idle.get_ref()).active } - } -} - -impl EventLoop for BasicLoop { - fn run(&mut self) { - // Not exactly efficient, but it gets the job done. - while self.remotes.len() > 0 || self.work.len() > 0 || self.has_idle() { - - self.work(); - self.remote_work(); - - if self.has_idle() { - self.idle(); - continue - } - - unsafe { - // We block here if we have no messages to process and we may - // receive a message at a later date - self.messages.hold_and_wait(|messages| { - self.remotes.len() > 0 && - messages.len() == 0 && - self.work.len() == 0 - }) - } - } - } - - fn callback(&mut self, f: proc()) { - self.work.push(f); - } - - // XXX: Seems like a really weird requirement to have an event loop provide. - fn pausable_idle_callback(&mut self, cb: ~Callback) -> ~PausableIdleCallback { - let callback = ~BasicPausable::new(self, cb); - rtassert!(self.idle.is_none()); - unsafe { - let cb_ptr: &*mut BasicPausable = cast::transmute(&callback); - self.idle = Some(*cb_ptr); - } - return callback as ~PausableIdleCallback; - } - - fn remote_callback(&mut self, f: ~Callback) -> ~RemoteCallback { - let id = self.next_remote; - self.next_remote += 1; - self.remotes.push((id, f)); - ~BasicRemote::new(self.messages.clone(), id) as ~RemoteCallback - } - - fn io<'a>(&'a mut self) -> Option<&'a mut IoFactory> { - let factory: &mut IoFactory = self.io; - Some(factory) - } -} - -struct BasicRemote { - queue: Exclusive<~[Message]>, - id: uint, -} - -impl BasicRemote { - fn new(queue: Exclusive<~[Message]>, id: uint) -> BasicRemote { - BasicRemote { queue: queue, id: id } - } -} - -impl RemoteCallback for BasicRemote { - fn fire(&mut self) { - unsafe { - self.queue.hold_and_signal(|queue| { - queue.push(RunRemote(self.id)); - }) - } - } -} - -impl Drop for BasicRemote { - fn drop(&mut self) { - unsafe { - self.queue.hold_and_signal(|queue| { - queue.push(RemoveRemote(self.id)); - }) - } - } -} - -struct BasicPausable { - eloop: *mut BasicLoop, - work: ~Callback, - active: bool, -} - -impl BasicPausable { - fn new(eloop: &mut BasicLoop, cb: ~Callback) -> BasicPausable { - BasicPausable { - active: false, - work: cb, - eloop: eloop, - } - } -} - -impl PausableIdleCallback for BasicPausable { - fn pause(&mut self) { - self.active = false; - } - fn resume(&mut self) { - self.active = true; - } -} - -impl Drop for BasicPausable { - fn drop(&mut self) { - unsafe { - (*self.eloop).idle = None; - } - } -} diff --git a/src/libstd/rt/borrowck.rs b/src/libstd/rt/borrowck.rs index 423981d9e91..d1e97cb6ec0 100644 --- a/src/libstd/rt/borrowck.rs +++ b/src/libstd/rt/borrowck.rs @@ -12,9 +12,8 @@ use c_str::{ToCStr, CString}; use libc::{c_char, size_t}; use option::{Option, None, Some}; use ptr::RawPtr; -use rt::env; +use rt; use rt::local::Local; -use rt::task; use rt::task::Task; use str::OwnedStr; use str; @@ -62,7 +61,7 @@ unsafe fn fail_borrowed(alloc: *mut raw::Box<()>, file: *c_char, line: size_t) match try_take_task_borrow_list() { None => { // not recording borrows let msg = "borrowed"; - msg.with_c_str(|msg_p| task::begin_unwind_raw(msg_p, file, line)) + msg.with_c_str(|msg_p| rt::begin_unwind_raw(msg_p, file, line)) } Some(borrow_list) => { // recording borrows let mut msg = ~"borrowed"; @@ -76,7 +75,7 @@ unsafe fn fail_borrowed(alloc: *mut raw::Box<()>, file: *c_char, line: size_t) sep = " and at "; } } - msg.with_c_str(|msg_p| task::begin_unwind_raw(msg_p, file, line)) + msg.with_c_str(|msg_p| rt::begin_unwind_raw(msg_p, file, line)) } } } @@ -95,7 +94,7 @@ unsafe fn debug_borrow>(tag: &'static str, //! A useful debugging function that prints a pointer + tag + newline //! without allocating memory. - if ENABLE_DEBUG && env::debug_borrow() { + if ENABLE_DEBUG && rt::env::debug_borrow() { debug_borrow_slow(tag, p, old_bits, new_bits, filename, line); } @@ -180,7 +179,7 @@ pub unsafe fn unrecord_borrow(a: *u8, if br.alloc != a || br.file != file || br.line != line { let err = format!("wrong borrow found, br={:?}", br); err.with_c_str(|msg_p| { - task::begin_unwind_raw(msg_p, file, line) + rt::begin_unwind_raw(msg_p, file, line) }) } borrow_list diff --git a/src/libstd/rt/context.rs b/src/libstd/rt/context.rs deleted file mode 100644 index 31cf0696881..00000000000 --- a/src/libstd/rt/context.rs +++ /dev/null @@ -1,463 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use option::*; -use super::stack::StackSegment; -use libc::c_void; -use uint; -use cast::{transmute, transmute_mut_unsafe, - transmute_region, transmute_mut_region}; - -pub static RED_ZONE: uint = 20 * 1024; - -// FIXME #7761: Registers is boxed so that it is 16-byte aligned, for storing -// SSE regs. It would be marginally better not to do this. In C++ we -// use an attribute on a struct. -// FIXME #7761: It would be nice to define regs as `~Option` since -// the registers are sometimes empty, but the discriminant would -// then misalign the regs again. -pub struct Context { - /// The context entry point, saved here for later destruction - priv start: Option<~proc()>, - /// Hold the registers while the task or scheduler is suspended - priv regs: ~Registers, - /// Lower bound and upper bound for the stack - priv stack_bounds: Option<(uint, uint)>, -} - -impl Context { - pub fn empty() -> Context { - Context { - start: None, - regs: new_regs(), - stack_bounds: None, - } - } - - /// Create a new context that will resume execution by running proc() - pub fn new(start: proc(), stack: &mut StackSegment) -> Context { - // FIXME #7767: Putting main into a ~ so it's a thin pointer and can - // be passed to the spawn function. Another unfortunate - // allocation - let start = ~start; - - // The C-ABI function that is the task entry point - extern fn task_start_wrapper(f: &proc()) { - // XXX(pcwalton): This may be sketchy. - unsafe { - let f: &|| = transmute(f); - (*f)() - } - } - - let fp: *c_void = task_start_wrapper as *c_void; - let argp: *c_void = unsafe { transmute::<&proc(), *c_void>(&*start) }; - let sp: *uint = stack.end(); - let sp: *mut uint = unsafe { transmute_mut_unsafe(sp) }; - // Save and then immediately load the current context, - // which we will then modify to call the given function when restored - let mut regs = new_regs(); - unsafe { - rust_swap_registers(transmute_mut_region(&mut *regs), transmute_region(&*regs)); - }; - - initialize_call_frame(&mut *regs, fp, argp, sp); - - // Scheduler tasks don't have a stack in the "we allocated it" sense, - // but rather they run on pthreads stacks. We have complete control over - // them in terms of the code running on them (and hopefully they don't - // overflow). Additionally, their coroutine stacks are listed as being - // zero-length, so that's how we detect what's what here. - let stack_base: *uint = stack.start(); - let bounds = if sp as uint == stack_base as uint { - None - } else { - Some((stack_base as uint, sp as uint)) - }; - return Context { - start: Some(start), - regs: regs, - stack_bounds: bounds, - } - } - - /* Switch contexts - - Suspend the current execution context and resume another by - saving the registers values of the executing thread to a Context - then loading the registers from a previously saved Context. - */ - pub fn swap(out_context: &mut Context, in_context: &Context) { - rtdebug!("swapping contexts"); - let out_regs: &mut Registers = match out_context { - &Context { regs: ~ref mut r, .. } => r - }; - let in_regs: &Registers = match in_context { - &Context { regs: ~ref r, .. } => r - }; - - rtdebug!("noting the stack limit and doing raw swap"); - - unsafe { - // Right before we switch to the new context, set the new context's - // stack limit in the OS-specified TLS slot. This also means that - // we cannot call any more rust functions after record_stack_bounds - // returns because they would all likely fail due to the limit being - // invalid for the current task. Lucky for us `rust_swap_registers` - // is a C function so we don't have to worry about that! - match in_context.stack_bounds { - Some((lo, hi)) => record_stack_bounds(lo, hi), - // If we're going back to one of the original contexts or - // something that's possibly not a "normal task", then reset - // the stack limit to 0 to make morestack never fail - None => record_stack_bounds(0, uint::max_value), - } - rust_swap_registers(out_regs, in_regs) - } - } -} - -extern { - fn rust_swap_registers(out_regs: *mut Registers, in_regs: *Registers); -} - -// Register contexts used in various architectures -// -// These structures all represent a context of one task throughout its -// execution. Each struct is a representation of the architecture's register -// set. When swapping between tasks, these register sets are used to save off -// the current registers into one struct, and load them all from another. -// -// Note that this is only used for context switching, which means that some of -// the registers may go unused. For example, for architectures with -// callee/caller saved registers, the context will only reflect the callee-saved -// registers. This is because the caller saved registers are already stored -// elsewhere on the stack (if it was necessary anyway). -// -// Additionally, there may be fields on various architectures which are unused -// entirely because they only reflect what is theoretically possible for a -// "complete register set" to show, but user-space cannot alter these registers. -// An example of this would be the segment selectors for x86. -// -// These structures/functions are roughly in-sync with the source files inside -// of src/rt/arch/$arch. The only currently used function from those folders is -// the `rust_swap_registers` function, but that's only because for now segmented -// stacks are disabled. - -#[cfg(target_arch = "x86")] -struct Registers { - eax: u32, ebx: u32, ecx: u32, edx: u32, - ebp: u32, esi: u32, edi: u32, esp: u32, - cs: u16, ds: u16, ss: u16, es: u16, fs: u16, gs: u16, - eflags: u32, eip: u32 -} - -#[cfg(target_arch = "x86")] -fn new_regs() -> ~Registers { - ~Registers { - eax: 0, ebx: 0, ecx: 0, edx: 0, - ebp: 0, esi: 0, edi: 0, esp: 0, - cs: 0, ds: 0, ss: 0, es: 0, fs: 0, gs: 0, - eflags: 0, eip: 0 - } -} - -#[cfg(target_arch = "x86")] -fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, - sp: *mut uint) { - - let sp = align_down(sp); - let sp = mut_offset(sp, -4); - - unsafe { *sp = arg as uint }; - let sp = mut_offset(sp, -1); - unsafe { *sp = 0 }; // The final return address - - regs.esp = sp as u32; - regs.eip = fptr as u32; - - // Last base pointer on the stack is 0 - regs.ebp = 0; -} - -// windows requires saving more registers (both general and XMM), so the windows -// register context must be larger. -#[cfg(windows, target_arch = "x86_64")] -type Registers = [uint, ..34]; -#[cfg(not(windows), target_arch = "x86_64")] -type Registers = [uint, ..22]; - -#[cfg(windows, target_arch = "x86_64")] -fn new_regs() -> ~Registers { ~([0, .. 34]) } -#[cfg(not(windows), target_arch = "x86_64")] -fn new_regs() -> ~Registers { ~([0, .. 22]) } - -#[cfg(target_arch = "x86_64")] -fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, - sp: *mut uint) { - - // Redefinitions from rt/arch/x86_64/regs.h - static RUSTRT_ARG0: uint = 3; - static RUSTRT_RSP: uint = 1; - static RUSTRT_IP: uint = 8; - static RUSTRT_RBP: uint = 2; - - let sp = align_down(sp); - let sp = mut_offset(sp, -1); - - // The final return address. 0 indicates the bottom of the stack - unsafe { *sp = 0; } - - rtdebug!("creating call frame"); - rtdebug!("fptr {}", fptr); - rtdebug!("arg {}", arg); - rtdebug!("sp {}", sp); - - regs[RUSTRT_ARG0] = arg as uint; - regs[RUSTRT_RSP] = sp as uint; - regs[RUSTRT_IP] = fptr as uint; - - // Last base pointer on the stack should be 0 - regs[RUSTRT_RBP] = 0; -} - -#[cfg(target_arch = "arm")] -type Registers = [uint, ..32]; - -#[cfg(target_arch = "arm")] -fn new_regs() -> ~Registers { ~([0, .. 32]) } - -#[cfg(target_arch = "arm")] -fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, - sp: *mut uint) { - let sp = align_down(sp); - // sp of arm eabi is 8-byte aligned - let sp = mut_offset(sp, -2); - - // The final return address. 0 indicates the bottom of the stack - unsafe { *sp = 0; } - - regs[0] = arg as uint; // r0 - regs[13] = sp as uint; // #53 sp, r13 - regs[14] = fptr as uint; // #60 pc, r15 --> lr -} - -#[cfg(target_arch = "mips")] -type Registers = [uint, ..32]; - -#[cfg(target_arch = "mips")] -fn new_regs() -> ~Registers { ~([0, .. 32]) } - -#[cfg(target_arch = "mips")] -fn initialize_call_frame(regs: &mut Registers, fptr: *c_void, arg: *c_void, - sp: *mut uint) { - let sp = align_down(sp); - // sp of mips o32 is 8-byte aligned - let sp = mut_offset(sp, -2); - - // The final return address. 0 indicates the bottom of the stack - unsafe { *sp = 0; } - - regs[4] = arg as uint; - regs[29] = sp as uint; - regs[25] = fptr as uint; - regs[31] = fptr as uint; -} - -fn align_down(sp: *mut uint) -> *mut uint { - unsafe { - let sp: uint = transmute(sp); - let sp = sp & !(16 - 1); - transmute::(sp) - } -} - -// ptr::mut_offset is positive ints only -#[inline] -pub fn mut_offset(ptr: *mut T, count: int) -> *mut T { - use mem::size_of; - (ptr as int + count * (size_of::() as int)) as *mut T -} - -#[inline(always)] -pub unsafe fn record_stack_bounds(stack_lo: uint, stack_hi: uint) { - // When the old runtime had segmented stacks, it used a calculation that was - // "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic - // symbol resolution, llvm function calls, etc. In theory this red zone - // value is 0, but it matters far less when we have gigantic stacks because - // we don't need to be so exact about our stack budget. The "fudge factor" - // was because LLVM doesn't emit a stack check for functions < 256 bytes in - // size. Again though, we have giant stacks, so we round all these - // calculations up to the nice round number of 20k. - record_sp_limit(stack_lo + RED_ZONE); - - return target_record_stack_bounds(stack_lo, stack_hi); - - #[cfg(not(windows))] #[cfg(not(target_arch = "x86_64"))] #[inline(always)] - unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {} - #[cfg(windows, target_arch = "x86_64")] #[inline(always)] - unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) { - // Windows compiles C functions which may check the stack bounds. This - // means that if we want to perform valid FFI on windows, then we need - // to ensure that the stack bounds are what they truly are for this - // task. More info can be found at: - // https://github.com/mozilla/rust/issues/3445#issuecomment-26114839 - // - // stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom) - asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile"); - asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile"); - } -} - -/// Records the current limit of the stack as specified by `end`. -/// -/// This is stored in an OS-dependent location, likely inside of the thread -/// local storage. The location that the limit is stored is a pre-ordained -/// location because it's where LLVM has emitted code to check. -/// -/// Note that this cannot be called under normal circumstances. This function is -/// changing the stack limit, so upon returning any further function calls will -/// possibly be triggering the morestack logic if you're not careful. -/// -/// Also note that this and all of the inside functions are all flagged as -/// "inline(always)" because they're messing around with the stack limits. This -/// would be unfortunate for the functions themselves to trigger a morestack -/// invocation (if they were an actual function call). -#[inline(always)] -pub unsafe fn record_sp_limit(limit: uint) { - return target_record_sp_limit(limit); - - // x86-64 - #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movq $$0x60+90*8, %rsi - movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile") - } - #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile") - } - #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block - // store this inside of the "arbitrary data slot", but double the size - // because this is 64 bit instead of 32 bit - asm!("movq $0, %gs:0x28" :: "r"(limit) :: "volatile") - } - #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile") - } - - // x86 - #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movl $$0x48+90*4, %eax - movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile") - } - #[cfg(target_arch = "x86", target_os = "linux")] - #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile") - } - #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - // see: http://en.wikipedia.org/wiki/Win32_Thread_Information_Block - // store this inside of the "arbitrary data slot" - asm!("movl $0, %fs:0x14" :: "r"(limit) :: "volatile") - } - - // mips, arm - Some brave soul can port these to inline asm, but it's over - // my head personally - #[cfg(target_arch = "mips")] - #[cfg(target_arch = "arm")] #[inline(always)] - unsafe fn target_record_sp_limit(limit: uint) { - return record_sp_limit(limit as *c_void); - extern { - fn record_sp_limit(limit: *c_void); - } - } -} - -/// The counterpart of the function above, this function will fetch the current -/// stack limit stored in TLS. -/// -/// Note that all of these functions are meant to be exact counterparts of their -/// brethren above, except that the operands are reversed. -/// -/// As with the setter, this function does not have a __morestack header and can -/// therefore be called in a "we're out of stack" situation. -#[inline(always)] -// currently only called by `rust_stack_exhausted`, which doesn't -// exist in a test build. -#[cfg(not(test))] -pub unsafe fn get_sp_limit() -> uint { - return target_get_sp_limit(); - - // x86-64 - #[cfg(target_arch = "x86_64", target_os = "macos")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq $$0x60+90*8, %rsi - movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile"); - return limit; - } - #[cfg(target_arch = "x86_64", target_os = "linux")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - #[cfg(target_arch = "x86_64", target_os = "win32")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq %gs:0x28, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - #[cfg(target_arch = "x86_64", target_os = "freebsd")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - - // x86 - #[cfg(target_arch = "x86", target_os = "macos")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movl $$0x48+90*4, %eax - movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile"); - return limit; - } - #[cfg(target_arch = "x86", target_os = "linux")] - #[cfg(target_arch = "x86", target_os = "freebsd")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - #[cfg(target_arch = "x86", target_os = "win32")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - let limit; - asm!("movl %fs:0x14, $0" : "=r"(limit) ::: "volatile"); - return limit; - } - - // mips, arm - Some brave soul can port these to inline asm, but it's over - // my head personally - #[cfg(target_arch = "mips")] - #[cfg(target_arch = "arm")] #[inline(always)] - unsafe fn target_get_sp_limit() -> uint { - return get_sp_limit() as uint; - extern { - fn get_sp_limit() -> *c_void; - } - } -} diff --git a/src/libstd/rt/env.rs b/src/libstd/rt/env.rs index d1bd450afe2..f3fa482b18c 100644 --- a/src/libstd/rt/env.rs +++ b/src/libstd/rt/env.rs @@ -17,7 +17,7 @@ use os; // Note that these are all accessed without any synchronization. // They are expected to be initialized once then left alone. -static mut MIN_STACK: uint = 2000000; +static mut MIN_STACK: uint = 2 * 1024 * 1024; static mut DEBUG_BORROW: bool = false; static mut POISON_ON_FREE: bool = false; diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs deleted file mode 100644 index f4f128cf5aa..00000000000 --- a/src/libstd/rt/kill.rs +++ /dev/null @@ -1,318 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*! - -Task death: asynchronous killing, linked failure, exit code propagation. - -This file implements two orthogonal building-blocks for communicating failure -between tasks. One is 'linked failure' or 'task killing', that is, a failing -task causing other tasks to fail promptly (even those that are blocked on -pipes or I/O). The other is 'exit code propagation', which affects the result -observed by the parent of a task::try task that itself spawns child tasks -(such as any #[test] function). In both cases the data structures live in -KillHandle. - - -I. Task killing. - -The model for killing involves two atomic flags, the "kill flag" and the -"unkillable flag". Operations on the kill flag include: - -- In the taskgroup code (task/spawn.rs), tasks store a clone of their - KillHandle in their shared taskgroup. Another task in the group that fails - will use that handle to call kill(). -- When a task blocks, it turns its ~Task into a BlockedTask by storing a - the transmuted ~Task pointer inside the KillHandle's kill flag. A task - trying to block and a task trying to kill it can simultaneously access the - kill flag, after which the task will get scheduled and fail (no matter who - wins the race). Likewise, a task trying to wake a blocked task normally and - a task trying to kill it can simultaneously access the flag; only one will - get the task to reschedule it. - -Operations on the unkillable flag include: - -- When a task becomes unkillable, it swaps on the flag to forbid any killer - from waking it up while it's blocked inside the unkillable section. If a - kill was already pending, the task fails instead of becoming unkillable. -- When a task is done being unkillable, it restores the flag to the normal - running state. If a kill was received-but-blocked during the unkillable - section, the task fails at this later point. -- When a task tries to kill another task, before swapping on the kill flag, it - first swaps on the unkillable flag, to see if it's "allowed" to wake up the - task. If it isn't, the killed task will receive the signal when it becomes - killable again. (Of course, a task trying to wake the task normally (e.g. - sending on a channel) does not access the unkillable flag at all.) - -Why do we not need acquire/release barriers on any of the kill flag swaps? -This is because barriers establish orderings between accesses on different -memory locations, but each kill-related operation is only a swap on a single -location, so atomicity is all that matters. The exception is kill(), which -does a swap on both flags in sequence. kill() needs no barriers because it -does not matter if its two accesses are seen reordered on another CPU: if a -killer does perform both writes, it means it saw a KILL_RUNNING in the -unkillable flag, which means an unkillable task will see KILL_KILLED and fail -immediately (rendering the subsequent write to the kill flag unnecessary). - - -II. Exit code propagation. - -The basic model for exit code propagation, which is used with the "watched" -spawn mode (on by default for linked spawns, off for supervised and unlinked -spawns), is that a parent will wait for all its watched children to exit -before reporting whether it succeeded or failed. A watching parent will only -report success if it succeeded and all its children also reported success; -otherwise, it will report failure. This is most useful for writing test cases: - - ``` -#[test] -fn test_something_in_another_task { - do spawn { - assert!(collatz_conjecture_is_false()); - } -} - ``` - -Here, as the child task will certainly outlive the parent task, we might miss -the failure of the child when deciding whether or not the test case passed. -The watched spawn mode avoids this problem. - -In order to propagate exit codes from children to their parents, any -'watching' parent must wait for all of its children to exit before it can -report its final exit status. We achieve this by using an UnsafeArc, using the -reference counting to track how many children are still alive, and using the -unwrap() operation in the parent's exit path to wait for all children to exit. -The UnsafeArc referred to here is actually the KillHandle itself. - -This also works transitively, as if a "middle" watched child task is itself -watching a grandchild task, the "middle" task will do unwrap() on its own -KillHandle (thereby waiting for the grandchild to exit) before dropping its -reference to its watching parent (which will alert the parent). - -While UnsafeArc::unwrap() accomplishes the synchronization, there remains the -matter of reporting the exit codes themselves. This is easiest when an exiting -watched task has no watched children of its own: - -- If the task with no watched children exits successfully, it need do nothing. -- If the task with no watched children has failed, it sets a flag in the - parent's KillHandle ("any_child_failed") to false. It then stays false forever. - -However, if a "middle" watched task with watched children of its own exits -before its child exits, we need to ensure that the grandparent task may still -see a failure from the grandchild task. While we could achieve this by having -each intermediate task block on its handle, this keeps around the other resources -the task was using. To be more efficient, this is accomplished via "tombstones". - -A tombstone is a closure, proc() -> bool, which will perform any waiting necessary -to collect the exit code of descendant tasks. In its environment is captured -the KillHandle of whichever task created the tombstone, and perhaps also any -tombstones that that task itself had, and finally also another tombstone, -effectively creating a lazy-list of heap closures. - -When a child wishes to exit early and leave tombstones behind for its parent, -it must use a LittleLock (pthread mutex) to synchronize with any possible -sibling tasks which are trying to do the same thing with the same parent. -However, on the other side, when the parent is ready to pull on the tombstones, -it need not use this lock, because the unwrap() serves as a barrier that ensures -no children will remain with references to the handle. - -The main logic for creating and assigning tombstones can be found in the -function reparent_children_to() in the impl for KillHandle. - - -IIA. Issues with exit code propagation. - -There are two known issues with the current scheme for exit code propagation. - -- As documented in issue #8136, the structure mandates the possibility for stack - overflow when collecting tombstones that are very deeply nested. This cannot - be avoided with the closure representation, as tombstones end up structured in - a sort of tree. However, notably, the tombstones do not actually need to be - collected in any particular order, and so a doubly-linked list may be used. - However we do not do this yet because DList is in libextra. - -- A discussion with Graydon made me realize that if we decoupled the exit code - propagation from the parents-waiting action, this could result in a simpler - implementation as the exit codes themselves would not have to be propagated, - and could instead be propagated implicitly through the taskgroup mechanism - that we already have. The tombstoning scheme would still be required. I have - not implemented this because currently we can't receive a linked failure kill - signal during the task cleanup activity, as that is currently "unkillable", - and occurs outside the task's unwinder's "try" block, so would require some - restructuring. - -*/ - -use cast; -use option::{Option, Some, None}; -use prelude::*; -use iter; -use task::TaskResult; -use rt::task::Task; -use unstable::atomics::{AtomicUint, SeqCst}; -use unstable::sync::UnsafeArc; - -/// A handle to a blocked task. Usually this means having the ~Task pointer by -/// ownership, but if the task is killable, a killer can steal it at any time. -pub enum BlockedTask { - Owned(~Task), - Shared(UnsafeArc), -} - -/// Per-task state related to task death, killing, failure, etc. -pub struct Death { - // Action to be done with the exit code. If set, also makes the task wait - // until all its watched children exit before collecting the status. - on_exit: Option, - // nesting level counter for unstable::atomically calls (0 == can deschedule). - priv wont_sleep: int, -} - -pub struct BlockedTaskIterator { - priv inner: UnsafeArc, -} - -impl Iterator for BlockedTaskIterator { - fn next(&mut self) -> Option { - Some(Shared(self.inner.clone())) - } -} - -impl BlockedTask { - /// Returns Some if the task was successfully woken; None if already killed. - pub fn wake(self) -> Option<~Task> { - match self { - Owned(task) => Some(task), - Shared(arc) => unsafe { - match (*arc.get()).swap(0, SeqCst) { - 0 => None, - n => cast::transmute(n), - } - } - } - } - - /// Create a blocked task, unless the task was already killed. - pub fn block(task: ~Task) -> BlockedTask { - Owned(task) - } - - /// Converts one blocked task handle to a list of many handles to the same. - pub fn make_selectable(self, num_handles: uint) - -> iter::Take - { - let arc = match self { - Owned(task) => { - let flag = unsafe { AtomicUint::new(cast::transmute(task)) }; - UnsafeArc::new(flag) - } - Shared(arc) => arc.clone(), - }; - BlockedTaskIterator{ inner: arc }.take(num_handles) - } - - // This assertion has two flavours because the wake involves an atomic op. - // In the faster version, destructors will fail dramatically instead. - #[inline] #[cfg(not(test))] - pub fn assert_already_awake(self) { } - #[inline] #[cfg(test)] - pub fn assert_already_awake(self) { assert!(self.wake().is_none()); } - - /// Convert to an unsafe uint value. Useful for storing in a pipe's state flag. - #[inline] - pub unsafe fn cast_to_uint(self) -> uint { - match self { - Owned(task) => { - let blocked_task_ptr: uint = cast::transmute(task); - rtassert!(blocked_task_ptr & 0x1 == 0); - blocked_task_ptr - } - Shared(arc) => { - let blocked_task_ptr: uint = cast::transmute(~arc); - rtassert!(blocked_task_ptr & 0x1 == 0); - blocked_task_ptr | 0x1 - } - } - } - - /// Convert from an unsafe uint value. Useful for retrieving a pipe's state flag. - #[inline] - pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask { - if blocked_task_ptr & 0x1 == 0 { - Owned(cast::transmute(blocked_task_ptr)) - } else { - let ptr: ~UnsafeArc = cast::transmute(blocked_task_ptr & !1); - Shared(*ptr) - } - } -} - -impl Death { - pub fn new() -> Death { - Death { - on_exit: None, - wont_sleep: 0, - } - } - - /// Collect failure exit codes from children and propagate them to a parent. - pub fn collect_failure(&mut self, result: TaskResult) { - match self.on_exit.take() { - Some(f) => f(result), - None => {} - } - } - - /// Enter a possibly-nested "atomic" section of code. Just for assertions. - /// All calls must be paired with a subsequent call to allow_deschedule. - #[inline] - pub fn inhibit_deschedule(&mut self) { - self.wont_sleep += 1; - } - - /// Exit a possibly-nested "atomic" section of code. Just for assertions. - /// All calls must be paired with a preceding call to inhibit_deschedule. - #[inline] - pub fn allow_deschedule(&mut self) { - rtassert!(self.wont_sleep != 0); - self.wont_sleep -= 1; - } - - /// Ensure that the task is allowed to become descheduled. - #[inline] - pub fn assert_may_sleep(&self) { - if self.wont_sleep != 0 { - rtabort!("illegal atomic-sleep: attempt to reschedule while \ - using an Exclusive or LittleLock"); - } - } -} - -impl Drop for Death { - fn drop(&mut self) { - // Mustn't be in an atomic or unkillable section at task death. - rtassert!(self.wont_sleep == 0); - } -} - -#[cfg(test)] -mod test { - use rt::test::*; - use super::*; - - // Task blocking tests - - #[test] - fn block_and_wake() { - do with_test_task |task| { - BlockedTask::block(task).wake().unwrap() - } - } -} diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs index d73ad98a25b..ea27956ad90 100644 --- a/src/libstd/rt/local.rs +++ b/src/libstd/rt/local.rs @@ -8,8 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use option::{Option, Some, None}; -use rt::sched::Scheduler; +use option::Option; use rt::task::Task; use rt::local_ptr; @@ -46,82 +45,6 @@ impl Local> for Task { } } -/// Encapsulates a temporarily-borrowed scheduler. -pub struct BorrowedScheduler { - priv task: local_ptr::Borrowed, -} - -impl BorrowedScheduler { - fn new(mut task: local_ptr::Borrowed) -> BorrowedScheduler { - if task.get().sched.is_none() { - rtabort!("no scheduler") - } else { - BorrowedScheduler { - task: task, - } - } - } - - #[inline] - pub fn get<'a>(&'a mut self) -> &'a mut ~Scheduler { - match self.task.get().sched { - None => rtabort!("no scheduler"), - Some(ref mut sched) => sched, - } - } -} - -impl Local for Scheduler { - fn put(value: ~Scheduler) { - let mut task = Local::borrow(None::); - task.get().sched = Some(value); - } - #[inline] - fn take() -> ~Scheduler { - unsafe { - // XXX: Unsafe for speed - let task: *mut Task = Local::unsafe_borrow(); - (*task).sched.take_unwrap() - } - } - fn exists(_: Option) -> bool { - let mut task = Local::borrow(None::); - task.get().sched.is_some() - } - #[inline] - fn borrow(_: Option) -> BorrowedScheduler { - BorrowedScheduler::new(Local::borrow(None::)) - } - unsafe fn unsafe_take() -> ~Scheduler { rtabort!("unimpl") } - unsafe fn unsafe_borrow() -> *mut Scheduler { - let task: *mut Task = Local::unsafe_borrow(); - match (*task).sched { - Some(~ref mut sched) => { - let s: *mut Scheduler = &mut *sched; - return s; - } - None => { - rtabort!("no scheduler") - } - } - } - unsafe fn try_unsafe_borrow() -> Option<*mut Scheduler> { - let task_opt: Option<*mut Task> = Local::try_unsafe_borrow(); - match task_opt { - Some(task) => { - match (*task).sched { - Some(~ref mut sched) => { - let s: *mut Scheduler = &mut *sched; - Some(s) - } - None => None - } - } - None => None - } - } -} - #[cfg(test)] mod test { use option::None; diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index be35e7579b7..d0c062c1274 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -57,27 +57,17 @@ Several modules in `core` are clients of `rt`: // XXX: this should not be here. #[allow(missing_doc)]; +use any::Any; use clone::Clone; use container::Container; use iter::Iterator; -use option::{Option, None, Some}; +use option::Option; use ptr::RawPtr; -use rt::local::Local; -use rt::sched::{Scheduler, Shutdown}; -use rt::sleeper_list::SleeperList; -use task::TaskResult; -use rt::task::{Task, SchedTask, GreenTask, Sched}; -use send_str::SendStrStatic; -use unstable::atomics::{AtomicInt, AtomicBool, SeqCst}; -use unstable::sync::UnsafeArc; +use result::Result; +use task::TaskOpts; use vec::{OwnedVector, MutableVector, ImmutableVector}; -use vec; -use self::thread::Thread; - -// the os module needs to reach into this helper, so allow general access -// through this reexport. -pub use self::util::set_exit_status; +use self::task::{Task, BlockedTask}; // this is somewhat useful when a program wants to spawn a "reasonable" number // of workers based on the constraints of the system that it's running on. @@ -85,8 +75,8 @@ pub use self::util::set_exit_status; // method... pub use self::util::default_sched_threads; -// Re-export of the functionality in the kill module -pub use self::kill::BlockedTask; +// Export unwinding facilities used by the failure macros +pub use self::unwind::{begin_unwind, begin_unwind_raw}; // XXX: these probably shouldn't be public... #[doc(hidden)] @@ -99,21 +89,12 @@ pub mod shouldnt_be_public { // Internal macros used by the runtime. mod macros; -/// Basic implementation of an EventLoop, provides no I/O interfaces -mod basic; - /// The global (exchange) heap. pub mod global_heap; /// Implementations of language-critical runtime features like @. pub mod task; -/// Facilities related to task failure, killing, and death. -mod kill; - -/// The coroutine task scheduler, built on the `io` event loop. -pub mod sched; - /// The EventLoop and internal synchronous I/O interface. pub mod rtio; @@ -121,27 +102,6 @@ pub mod rtio; /// or task-local storage. pub mod local; -/// A mostly lock-free multi-producer, single consumer queue. -pub mod mpsc_queue; - -/// A lock-free single-producer, single consumer queue. -pub mod spsc_queue; - -/// A lock-free multi-producer, multi-consumer bounded queue. -mod mpmc_bounded_queue; - -/// A parallel work-stealing deque -pub mod deque; - -/// A parallel data structure for tracking sleeping schedulers. -pub mod sleeper_list; - -/// Stack segments and caching. -pub mod stack; - -/// CPU context swapping. -mod context; - /// Bindings to system threading libraries. pub mod thread; @@ -157,16 +117,6 @@ pub mod logging; /// Crate map pub mod crate_map; -/// Tools for testing the runtime -pub mod test; - -/// Reference counting -pub mod rc; - -/// A simple single-threaded channel type for passing buffered data between -/// scheduler and task context -pub mod tube; - /// The runtime needs to be able to put a pointer into thread-local storage. mod local_ptr; diff --git a/src/libstd/rt/rc.rs b/src/libstd/rt/rc.rs deleted file mode 100644 index 2699dab6d38..00000000000 --- a/src/libstd/rt/rc.rs +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! An owned, task-local, reference counted type -//! -//! # Safety note -//! -//! XXX There is currently no type-system mechanism for enforcing that -//! reference counted types are both allocated on the exchange heap -//! and also non-sendable -//! -//! This doesn't prevent borrowing multiple aliasable mutable pointers - -use ops::Drop; -use clone::Clone; -use libc::c_void; -use cast; - -pub struct RC { - priv p: *c_void // ~(uint, T) -} - -impl RC { - pub fn new(val: T) -> RC { - unsafe { - let v = ~(1, val); - let p: *c_void = cast::transmute(v); - RC { p: p } - } - } - - fn get_mut_state(&mut self) -> *mut (uint, T) { - unsafe { - let p: &mut ~(uint, T) = cast::transmute(&mut self.p); - let p: *mut (uint, T) = &mut **p; - return p; - } - } - - fn get_state(&self) -> *(uint, T) { - unsafe { - let p: &~(uint, T) = cast::transmute(&self.p); - let p: *(uint, T) = &**p; - return p; - } - } - - pub fn unsafe_borrow_mut(&mut self) -> *mut T { - unsafe { - match *self.get_mut_state() { - (_, ref mut p) => { - let p: *mut T = p; - return p; - } - } - } - } - - pub fn refcount(&self) -> uint { - unsafe { - match *self.get_state() { - (count, _) => count - } - } - } -} - -#[unsafe_destructor] -impl Drop for RC { - fn drop(&mut self) { - assert!(self.refcount() > 0); - - unsafe { - match *self.get_mut_state() { - (ref mut count, _) => { - *count = *count - 1 - } - } - - if self.refcount() == 0 { - let _: ~(uint, T) = cast::transmute(self.p); - } - } - } -} - -impl Clone for RC { - fn clone(&self) -> RC { - unsafe { - // XXX: Mutable clone - let this: &mut RC = cast::transmute_mut(self); - - match *this.get_mut_state() { - (ref mut count, _) => { - *count = *count + 1; - } - } - } - - RC { p: self.p } - } -} - -#[cfg(test)] -mod test { - use super::RC; - - #[test] - fn smoke_test() { - unsafe { - let mut v1 = RC::new(100); - assert!(*v1.unsafe_borrow_mut() == 100); - assert!(v1.refcount() == 1); - - let mut v2 = v1.clone(); - assert!(*v2.unsafe_borrow_mut() == 100); - assert!(v2.refcount() == 2); - - *v2.unsafe_borrow_mut() = 200; - assert!(*v2.unsafe_borrow_mut() == 200); - assert!(*v1.unsafe_borrow_mut() == 200); - - let v3 = v2.clone(); - assert!(v3.refcount() == 3); - { - let _v1 = v1; - let _v2 = v2; - } - assert!(v3.refcount() == 1); - } - } -} diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index 7207c1a8134..c1c40cc6dff 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -14,14 +14,15 @@ use comm::{SharedChan, Port}; use libc::c_int; use libc; use ops::Drop; -use option::*; +use option::{Option, Some, None}; use path::Path; -use result::*; +use result::{Result, Ok, Err}; +use rt::task::Task; +use rt::local::Local; use ai = io::net::addrinfo; +use io; use io::IoError; -use io::native::NATIVE_IO_FACTORY; -use io::native; use io::net::ip::{IpAddr, SocketAddr}; use io::process::{ProcessConfig, ProcessExit}; use io::signal::Signum; @@ -149,6 +150,8 @@ impl<'a> LocalIo<'a> { } pub trait IoFactory { + fn id(&self) -> uint; + // networking fn tcp_connect(&mut self, addr: SocketAddr) -> Result<~RtioTcpStream, IoError>; fn tcp_bind(&mut self, addr: SocketAddr) -> Result<~RtioTcpListener, IoError>; diff --git a/src/libstd/rt/sched.rs b/src/libstd/rt/sched.rs deleted file mode 100644 index 15aa1602cd0..00000000000 --- a/src/libstd/rt/sched.rs +++ /dev/null @@ -1,1395 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use option::{Option, Some, None}; -use cast::{transmute, transmute_mut_region, transmute_mut_unsafe}; -use clone::Clone; -use unstable::raw; -use super::sleeper_list::SleeperList; -use super::stack::{StackPool}; -use super::rtio::EventLoop; -use super::context::Context; -use super::task::{Task, AnySched, Sched}; -use rt::kill::BlockedTask; -use rt::deque; -use rt::local_ptr; -use rt::local::Local; -use rt::rtio::{RemoteCallback, PausableIdleCallback, Callback}; -use borrow::{to_uint}; -use rand::{XorShiftRng, Rng, Rand}; -use iter::range; -use unstable::mutex::Mutex; -use vec::{OwnedVector}; - -use mpsc = super::mpsc_queue; - -/// A scheduler is responsible for coordinating the execution of Tasks -/// on a single thread. The scheduler runs inside a slightly modified -/// Rust Task. When not running this task is stored in the scheduler -/// struct. The scheduler struct acts like a baton, all scheduling -/// actions are transfers of the baton. -/// -/// XXX: This creates too many callbacks to run_sched_once, resulting -/// in too much allocation and too many events. -pub struct Scheduler { - /// There are N work queues, one per scheduler. - work_queue: deque::Worker<~Task>, - /// Work queues for the other schedulers. These are created by - /// cloning the core work queues. - work_queues: ~[deque::Stealer<~Task>], - /// The queue of incoming messages from other schedulers. - /// These are enqueued by SchedHandles after which a remote callback - /// is triggered to handle the message. - message_queue: mpsc::Consumer, - /// Producer used to clone sched handles from - message_producer: mpsc::Producer, - /// A shared list of sleeping schedulers. We'll use this to wake - /// up schedulers when pushing work onto the work queue. - sleeper_list: SleeperList, - /// Indicates that we have previously pushed a handle onto the - /// SleeperList but have not yet received the Wake message. - /// Being `true` does not necessarily mean that the scheduler is - /// not active since there are multiple event sources that may - /// wake the scheduler. It just prevents the scheduler from pushing - /// multiple handles onto the sleeper list. - sleepy: bool, - /// A flag to indicate we've received the shutdown message and should - /// no longer try to go to sleep, but exit instead. - no_sleep: bool, - stack_pool: StackPool, - /// The scheduler runs on a special task. When it is not running - /// it is stored here instead of the work queue. - sched_task: Option<~Task>, - /// An action performed after a context switch on behalf of the - /// code running before the context switch - cleanup_job: Option, - /// Should this scheduler run any task, or only pinned tasks? - run_anything: bool, - /// If the scheduler shouldn't run some tasks, a friend to send - /// them to. - friend_handle: Option, - /// A fast XorShift rng for scheduler use - rng: XorShiftRng, - /// A togglable idle callback - idle_callback: Option<~PausableIdleCallback>, - /// A countdown that starts at a random value and is decremented - /// every time a yield check is performed. When it hits 0 a task - /// will yield. - yield_check_count: uint, - /// A flag to tell the scheduler loop it needs to do some stealing - /// in order to introduce randomness as part of a yield - steal_for_yield: bool, - - // n.b. currently destructors of an object are run in top-to-bottom in order - // of field declaration. Due to its nature, the pausable idle callback - // must have some sort of handle to the event loop, so it needs to get - // destroyed before the event loop itself. For this reason, we destroy - // the event loop last to ensure that any unsafe references to it are - // destroyed before it's actually destroyed. - - /// The event loop used to drive the scheduler and perform I/O - event_loop: ~EventLoop, -} - -/// An indication of how hard to work on a given operation, the difference -/// mainly being whether memory is synchronized or not -#[deriving(Eq)] -enum EffortLevel { - DontTryTooHard, - GiveItYourBest -} - -static MAX_YIELD_CHECKS: uint = 20000; - -fn reset_yield_check(rng: &mut XorShiftRng) -> uint { - let r: uint = Rand::rand(rng); - r % MAX_YIELD_CHECKS + 1 -} - -impl Scheduler { - - // * Initialization Functions - - pub fn new(event_loop: ~EventLoop, - work_queue: deque::Worker<~Task>, - work_queues: ~[deque::Stealer<~Task>], - sleeper_list: SleeperList) - -> Scheduler { - - Scheduler::new_special(event_loop, work_queue, - work_queues, - sleeper_list, true, None) - - } - - pub fn new_special(event_loop: ~EventLoop, - work_queue: deque::Worker<~Task>, - work_queues: ~[deque::Stealer<~Task>], - sleeper_list: SleeperList, - run_anything: bool, - friend: Option) - -> Scheduler { - - let (consumer, producer) = mpsc::queue(()); - let mut sched = Scheduler { - sleeper_list: sleeper_list, - message_queue: consumer, - message_producer: producer, - sleepy: false, - no_sleep: false, - event_loop: event_loop, - work_queue: work_queue, - work_queues: work_queues, - stack_pool: StackPool::new(), - sched_task: None, - cleanup_job: None, - run_anything: run_anything, - friend_handle: friend, - rng: new_sched_rng(), - idle_callback: None, - yield_check_count: 0, - steal_for_yield: false - }; - - sched.yield_check_count = reset_yield_check(&mut sched.rng); - - return sched; - } - - // XXX: This may eventually need to be refactored so that - // the scheduler itself doesn't have to call event_loop.run. - // That will be important for embedding the runtime into external - // event loops. - - // Take a main task to run, and a scheduler to run it in. Create a - // scheduler task and bootstrap into it. - pub fn bootstrap(mut ~self, task: ~Task) { - - // Build an Idle callback. - let cb = ~SchedRunner as ~Callback; - self.idle_callback = Some(self.event_loop.pausable_idle_callback(cb)); - - // Initialize the TLS key. - local_ptr::init(); - - // Create a task for the scheduler with an empty context. - let sched_task = ~Task::new_sched_task(); - - // Now that we have an empty task struct for the scheduler - // task, put it in TLS. - Local::put(sched_task); - - // Before starting our first task, make sure the idle callback - // is active. As we do not start in the sleep state this is - // important. - self.idle_callback.get_mut_ref().resume(); - - // Now, as far as all the scheduler state is concerned, we are - // inside the "scheduler" context. So we can act like the - // scheduler and resume the provided task. - self.resume_task_immediately(task); - - // Now we are back in the scheduler context, having - // successfully run the input task. Start by running the - // scheduler. Grab it out of TLS - performing the scheduler - // action will have given it away. - let sched: ~Scheduler = Local::take(); - - rtdebug!("starting scheduler {}", sched.sched_id()); - sched.run(); - - // Close the idle callback. - let mut sched: ~Scheduler = Local::take(); - sched.idle_callback.take(); - // Make one go through the loop to run the close callback. - sched.run(); - - // Now that we are done with the scheduler, clean up the - // scheduler task. Do so by removing it from TLS and manually - // cleaning up the memory it uses. As we didn't actually call - // task.run() on the scheduler task we never get through all - // the cleanup code it runs. - let mut stask: ~Task = Local::take(); - - rtdebug!("stopping scheduler {}", stask.sched.get_ref().sched_id()); - - // Should not have any messages - let message = stask.sched.get_mut_ref().message_queue.pop(); - rtassert!(match message { mpsc::Empty => true, _ => false }); - - stask.destroyed = true; - } - - // This does not return a scheduler, as the scheduler is placed - // inside the task. - pub fn run(mut ~self) { - - // This is unsafe because we need to place the scheduler, with - // the event_loop inside, inside our task. But we still need a - // mutable reference to the event_loop to give it the "run" - // command. - unsafe { - let event_loop: *mut ~EventLoop = &mut self.event_loop; - - { - // Our scheduler must be in the task before the event loop - // is started. - let mut stask = Local::borrow(None::); - stask.get().sched = Some(self); - } - - (*event_loop).run(); - } - } - - // * Execution Functions - Core Loop Logic - - // The model for this function is that you continue through it - // until you either use the scheduler while performing a schedule - // action, in which case you give it away and return early, or - // you reach the end and sleep. In the case that a scheduler - // action is performed the loop is evented such that this function - // is called again. - fn run_sched_once() { - - // When we reach the scheduler context via the event loop we - // already have a scheduler stored in our local task, so we - // start off by taking it. This is the only path through the - // scheduler where we get the scheduler this way. - let mut sched: ~Scheduler = Local::take(); - - // Assume that we need to continue idling unless we reach the - // end of this function without performing an action. - sched.idle_callback.get_mut_ref().resume(); - - // First we check for scheduler messages, these are higher - // priority than regular tasks. - let sched = match sched.interpret_message_queue(DontTryTooHard) { - Some(sched) => sched, - None => return - }; - - // This helper will use a randomized work-stealing algorithm - // to find work. - let sched = match sched.do_work() { - Some(sched) => sched, - None => return - }; - - // Now, before sleeping we need to find out if there really - // were any messages. Give it your best! - let mut sched = match sched.interpret_message_queue(GiveItYourBest) { - Some(sched) => sched, - None => return - }; - - // If we got here then there was no work to do. - // Generate a SchedHandle and push it to the sleeper list so - // somebody can wake us up later. - if !sched.sleepy && !sched.no_sleep { - rtdebug!("scheduler has no work to do, going to sleep"); - sched.sleepy = true; - let handle = sched.make_handle(); - sched.sleeper_list.push(handle); - // Since we are sleeping, deactivate the idle callback. - sched.idle_callback.get_mut_ref().pause(); - } else { - rtdebug!("not sleeping, already doing so or no_sleep set"); - // We may not be sleeping, but we still need to deactivate - // the idle callback. - sched.idle_callback.get_mut_ref().pause(); - } - - // Finished a cycle without using the Scheduler. Place it back - // in TLS. - Local::put(sched); - } - - // This function returns None if the scheduler is "used", or it - // returns the still-available scheduler. At this point all - // message-handling will count as a turn of work, and as a result - // return None. - fn interpret_message_queue(mut ~self, effort: EffortLevel) -> Option<~Scheduler> { - - let msg = if effort == DontTryTooHard { - self.message_queue.casual_pop() - } else { - // When popping our message queue, we could see an "inconsistent" - // state which means that we *should* be able to pop data, but we - // are unable to at this time. Our options are: - // - // 1. Spin waiting for data - // 2. Ignore this and pretend we didn't find a message - // - // If we choose route 1, then if the pusher in question is currently - // pre-empted, we're going to take up our entire time slice just - // spinning on this queue. If we choose route 2, then the pusher in - // question is still guaranteed to make a send() on its async - // handle, so we will guaranteed wake up and see its message at some - // point. - // - // I have chosen to take route #2. - match self.message_queue.pop() { - mpsc::Data(t) => Some(t), - mpsc::Empty | mpsc::Inconsistent => None - } - }; - - match msg { - Some(PinnedTask(task)) => { - let mut task = task; - task.give_home(Sched(self.make_handle())); - self.resume_task_immediately(task); - return None; - } - Some(TaskFromFriend(task)) => { - rtdebug!("got a task from a friend. lovely!"); - self.process_task(task, Scheduler::resume_task_immediately_cl); - return None; - } - Some(RunOnce(task)) => { - // bypass the process_task logic to force running this task once - // on this home scheduler. This is often used for I/O (homing). - Scheduler::resume_task_immediately_cl(self, task); - return None; - } - Some(Wake) => { - self.sleepy = false; - Local::put(self); - return None; - } - Some(Shutdown) => { - rtdebug!("shutting down"); - if self.sleepy { - // There may be an outstanding handle on the - // sleeper list. Pop them all to make sure that's - // not the case. - loop { - match self.sleeper_list.pop() { - Some(handle) => { - let mut handle = handle; - handle.send(Wake); - } - None => break - } - } - } - // No more sleeping. After there are no outstanding - // event loop references we will shut down. - self.no_sleep = true; - self.sleepy = false; - Local::put(self); - return None; - } - None => { - return Some(self); - } - } - } - - fn do_work(mut ~self) -> Option<~Scheduler> { - rtdebug!("scheduler calling do work"); - match self.find_work() { - Some(task) => { - rtdebug!("found some work! processing the task"); - self.process_task(task, Scheduler::resume_task_immediately_cl); - return None; - } - None => { - rtdebug!("no work was found, returning the scheduler struct"); - return Some(self); - } - } - } - - // Workstealing: In this iteration of the runtime each scheduler - // thread has a distinct work queue. When no work is available - // locally, make a few attempts to steal work from the queues of - // other scheduler threads. If a few steals fail we end up in the - // old "no work" path which is fine. - - // First step in the process is to find a task. This function does - // that by first checking the local queue, and if there is no work - // there, trying to steal from the remote work queues. - fn find_work(&mut self) -> Option<~Task> { - rtdebug!("scheduler looking for work"); - if !self.steal_for_yield { - match self.work_queue.pop() { - Some(task) => { - rtdebug!("found a task locally"); - return Some(task) - } - None => { - rtdebug!("scheduler trying to steal"); - return self.try_steals(); - } - } - } else { - // During execution of the last task, it performed a 'yield', - // so we're doing some work stealing in order to introduce some - // scheduling randomness. Otherwise we would just end up popping - // that same task again. This is pretty lame and is to work around - // the problem that work stealing is not designed for 'non-strict' - // (non-fork-join) task parallelism. - self.steal_for_yield = false; - match self.try_steals() { - Some(task) => { - rtdebug!("stole a task after yielding"); - return Some(task); - } - None => { - rtdebug!("did not steal a task after yielding"); - // Back to business - return self.find_work(); - } - } - } - } - - // Try stealing from all queues the scheduler knows about. This - // naive implementation can steal from our own queue or from other - // special schedulers. - fn try_steals(&mut self) -> Option<~Task> { - let work_queues = &mut self.work_queues; - let len = work_queues.len(); - let start_index = self.rng.gen_range(0, len); - for index in range(0, len).map(|i| (i + start_index) % len) { - match work_queues[index].steal() { - deque::Data(task) => { - rtdebug!("found task by stealing"); - return Some(task) - } - _ => () - } - }; - rtdebug!("giving up on stealing"); - return None; - } - - // * Task Routing Functions - Make sure tasks send up in the right - // place. - - fn process_task(mut ~self, mut task: ~Task, schedule_fn: SchedulingFn) { - rtdebug!("processing a task"); - - let home = task.take_unwrap_home(); - match home { - Sched(home_handle) => { - if home_handle.sched_id != self.sched_id() { - rtdebug!("sending task home"); - task.give_home(Sched(home_handle)); - Scheduler::send_task_home(task); - Local::put(self); - } else { - rtdebug!("running task here"); - task.give_home(Sched(home_handle)); - schedule_fn(self, task); - } - } - AnySched if self.run_anything => { - rtdebug!("running anysched task here"); - task.give_home(AnySched); - schedule_fn(self, task); - } - AnySched => { - rtdebug!("sending task to friend"); - task.give_home(AnySched); - self.send_to_friend(task); - Local::put(self); - } - } - } - - fn send_task_home(task: ~Task) { - let mut task = task; - let mut home = task.take_unwrap_home(); - match home { - Sched(ref mut home_handle) => { - home_handle.send(PinnedTask(task)); - } - AnySched => { - rtabort!("error: cannot send anysched task home"); - } - } - } - - /// Take a non-homed task we aren't allowed to run here and send - /// it to the designated friend scheduler to execute. - fn send_to_friend(&mut self, task: ~Task) { - rtdebug!("sending a task to friend"); - match self.friend_handle { - Some(ref mut handle) => { - handle.send(TaskFromFriend(task)); - } - None => { - rtabort!("tried to send task to a friend but scheduler has no friends"); - } - } - } - - /// Schedule a task to be executed later. - /// - /// Pushes the task onto the work stealing queue and tells the - /// event loop to run it later. Always use this instead of pushing - /// to the work queue directly. - pub fn enqueue_task(&mut self, task: ~Task) { - - // We push the task onto our local queue clone. - self.work_queue.push(task); - self.idle_callback.get_mut_ref().resume(); - - // We've made work available. Notify a - // sleeping scheduler. - - match self.sleeper_list.casual_pop() { - Some(handle) => { - let mut handle = handle; - handle.send(Wake) - } - None => { (/* pass */) } - }; - } - - /// As enqueue_task, but with the possibility for the blocked task to - /// already have been killed. - pub fn enqueue_blocked_task(&mut self, blocked_task: BlockedTask) { - blocked_task.wake().map(|task| self.enqueue_task(task)); - } - - // * Core Context Switching Functions - - // The primary function for changing contexts. In the current - // design the scheduler is just a slightly modified GreenTask, so - // all context swaps are from Task to Task. The only difference - // between the various cases is where the inputs come from, and - // what is done with the resulting task. That is specified by the - // cleanup function f, which takes the scheduler and the - // old task as inputs. - - pub fn change_task_context(mut ~self, - next_task: ~Task, - f: |&mut Scheduler, ~Task|) { - // The current task is grabbed from TLS, not taken as an input. - // Doing an unsafe_take to avoid writing back a null pointer - - // We're going to call `put` later to do that. - let current_task: ~Task = unsafe { Local::unsafe_take() }; - - // Check that the task is not in an atomically() section (e.g., - // holding a pthread mutex, which could deadlock the scheduler). - current_task.death.assert_may_sleep(); - - // These transmutes do something fishy with a closure. - let f_fake_region = unsafe { - transmute::<|&mut Scheduler, ~Task|, - |&mut Scheduler, ~Task|>(f) - }; - let f_opaque = ClosureConverter::from_fn(f_fake_region); - - // The current task is placed inside an enum with the cleanup - // function. This enum is then placed inside the scheduler. - self.cleanup_job = Some(CleanupJob::new(current_task, f_opaque)); - - // The scheduler is then placed inside the next task. - let mut next_task = next_task; - next_task.sched = Some(self); - - // However we still need an internal mutable pointer to the - // original task. The strategy here was "arrange memory, then - // get pointers", so we crawl back up the chain using - // transmute to eliminate borrowck errors. - unsafe { - - let sched: &mut Scheduler = - transmute_mut_region(*next_task.sched.get_mut_ref()); - - let current_task: &mut Task = match sched.cleanup_job { - Some(CleanupJob { task: ref task, .. }) => { - let task_ptr: *~Task = task; - transmute_mut_region(*transmute_mut_unsafe(task_ptr)) - } - None => { - rtabort!("no cleanup job"); - } - }; - - let (current_task_context, next_task_context) = - Scheduler::get_contexts(current_task, next_task); - - // Done with everything - put the next task in TLS. This - // works because due to transmute the borrow checker - // believes that we have no internal pointers to - // next_task. - Local::put(next_task); - - // The raw context swap operation. The next action taken - // will be running the cleanup job from the context of the - // next task. - Context::swap(current_task_context, next_task_context); - } - - // When the context swaps back to this task we immediately - // run the cleanup job, as expected by the previously called - // swap_contexts function. - unsafe { - let task: *mut Task = Local::unsafe_borrow(); - (*task).sched.get_mut_ref().run_cleanup_job(); - - // See the comments in switch_running_tasks_and_then for why a lock - // is acquired here. This is the resumption points and the "bounce" - // that it is referring to. - (*task).nasty_deschedule_lock.lock(); - (*task).nasty_deschedule_lock.unlock(); - } - } - - // Returns a mutable reference to both contexts involved in this - // swap. This is unsafe - we are getting mutable internal - // references to keep even when we don't own the tasks. It looks - // kinda safe because we are doing transmutes before passing in - // the arguments. - pub fn get_contexts<'a>(current_task: &mut Task, next_task: &mut Task) -> - (&'a mut Context, &'a mut Context) { - let current_task_context = - &mut current_task.coroutine.get_mut_ref().saved_context; - let next_task_context = - &mut next_task.coroutine.get_mut_ref().saved_context; - unsafe { - (transmute_mut_region(current_task_context), - transmute_mut_region(next_task_context)) - } - } - - // * Context Swapping Helpers - Here be ugliness! - - pub fn resume_task_immediately(~self, task: ~Task) { - self.change_task_context(task, |sched, stask| { - sched.sched_task = Some(stask); - }) - } - - fn resume_task_immediately_cl(sched: ~Scheduler, - task: ~Task) { - sched.resume_task_immediately(task) - } - - - pub fn resume_blocked_task_immediately(~self, blocked_task: BlockedTask) { - match blocked_task.wake() { - Some(task) => { self.resume_task_immediately(task); } - None => Local::put(self) - }; - } - - /// Block a running task, context switch to the scheduler, then pass the - /// blocked task to a closure. - /// - /// # Safety note - /// - /// The closure here is a *stack* closure that lives in the - /// running task. It gets transmuted to the scheduler's lifetime - /// and called while the task is blocked. - /// - /// This passes a Scheduler pointer to the fn after the context switch - /// in order to prevent that fn from performing further scheduling operations. - /// Doing further scheduling could easily result in infinite recursion. - /// - /// Note that if the closure provided relinquishes ownership of the - /// BlockedTask, then it is possible for the task to resume execution before - /// the closure has finished executing. This would naturally introduce a - /// race if the closure and task shared portions of the environment. - /// - /// This situation is currently prevented, or in other words it is - /// guaranteed that this function will not return before the given closure - /// has returned. - pub fn deschedule_running_task_and_then(mut ~self, - f: |&mut Scheduler, BlockedTask|) { - // Trickier - we need to get the scheduler task out of self - // and use it as the destination. - let stask = self.sched_task.take_unwrap(); - // Otherwise this is the same as below. - self.switch_running_tasks_and_then(stask, f); - } - - pub fn switch_running_tasks_and_then(~self, next_task: ~Task, - f: |&mut Scheduler, BlockedTask|) { - // And here comes one of the sad moments in which a lock is used in a - // core portion of the rust runtime. As always, this is highly - // undesirable, so there's a good reason behind it. - // - // There is an excellent outline of the problem in issue #8132, and it's - // summarized in that `f` is executed on a sched task, but its - // environment is on the previous task. If `f` relinquishes ownership of - // the BlockedTask, then it may introduce a race where `f` is using the - // environment as well as the code after the 'deschedule' block. - // - // The solution we have chosen to adopt for now is to acquire a - // task-local lock around this block. The resumption of the task in - // context switching will bounce on the lock, thereby waiting for this - // block to finish, eliminating the race mentioned above. - // - // To actually maintain a handle to the lock, we use an unsafe pointer - // to it, but we're guaranteed that the task won't exit until we've - // unlocked the lock so there's no worry of this memory going away. - self.change_task_context(next_task, |sched, mut task| { - let lock: *mut Mutex = &mut task.nasty_deschedule_lock; - unsafe { (*lock).lock() } - f(sched, BlockedTask::block(task)); - unsafe { (*lock).unlock() } - }) - } - - fn switch_task(sched: ~Scheduler, task: ~Task) { - sched.switch_running_tasks_and_then(task, |sched, last_task| { - sched.enqueue_blocked_task(last_task); - }); - } - - // * Task Context Helpers - - /// Called by a running task to end execution, after which it will - /// be recycled by the scheduler for reuse in a new task. - pub fn terminate_current_task(mut ~self) { - // Similar to deschedule running task and then, but cannot go through - // the task-blocking path. The task is already dying. - let stask = self.sched_task.take_unwrap(); - self.change_task_context(stask, |sched, mut dead_task| { - let coroutine = dead_task.coroutine.take_unwrap(); - coroutine.recycle(&mut sched.stack_pool); - }) - } - - pub fn run_task(task: ~Task) { - let sched: ~Scheduler = Local::take(); - sched.process_task(task, Scheduler::switch_task); - } - - pub fn run_task_later(next_task: ~Task) { - let mut sched = Local::borrow(None::); - sched.get().enqueue_task(next_task); - } - - /// Yield control to the scheduler, executing another task. This is guaranteed - /// to introduce some amount of randomness to the scheduler. Currently the - /// randomness is a result of performing a round of work stealing (which - /// may end up stealing from the current scheduler). - pub fn yield_now(mut ~self) { - self.yield_check_count = reset_yield_check(&mut self.rng); - // Tell the scheduler to start stealing on the next iteration - self.steal_for_yield = true; - self.deschedule_running_task_and_then(|sched, task| { - sched.enqueue_blocked_task(task); - }) - } - - pub fn maybe_yield(mut ~self) { - // The number of times to do the yield check before yielding, chosen arbitrarily. - rtassert!(self.yield_check_count > 0); - self.yield_check_count -= 1; - if self.yield_check_count == 0 { - self.yield_now(); - } else { - Local::put(self); - } - } - - - // * Utility Functions - - pub fn sched_id(&self) -> uint { to_uint(self) } - - pub fn run_cleanup_job(&mut self) { - let cleanup_job = self.cleanup_job.take_unwrap(); - cleanup_job.run(self); - } - - pub fn make_handle(&mut self) -> SchedHandle { - let remote = self.event_loop.remote_callback(~SchedRunner as ~Callback); - - return SchedHandle { - remote: remote, - queue: self.message_producer.clone(), - sched_id: self.sched_id() - }; - } -} - -// Supporting types - -type SchedulingFn = extern "Rust" fn (~Scheduler, ~Task); - -pub enum SchedMessage { - Wake, - Shutdown, - PinnedTask(~Task), - TaskFromFriend(~Task), - RunOnce(~Task), -} - -pub struct SchedHandle { - priv remote: ~RemoteCallback, - priv queue: mpsc::Producer, - sched_id: uint -} - -impl SchedHandle { - pub fn send(&mut self, msg: SchedMessage) { - self.queue.push(msg); - self.remote.fire(); - } -} - -struct SchedRunner; - -impl Callback for SchedRunner { - fn call(&mut self) { - Scheduler::run_sched_once(); - } -} - -struct CleanupJob { - task: ~Task, - f: UnsafeTaskReceiver -} - -impl CleanupJob { - pub fn new(task: ~Task, f: UnsafeTaskReceiver) -> CleanupJob { - CleanupJob { - task: task, - f: f - } - } - - pub fn run(self, sched: &mut Scheduler) { - let CleanupJob { task: task, f: f } = self; - f.to_fn()(sched, task) - } -} - -// XXX: Some hacks to put a || closure in Scheduler without borrowck -// complaining -type UnsafeTaskReceiver = raw::Closure; -trait ClosureConverter { - fn from_fn(|&mut Scheduler, ~Task|) -> Self; - fn to_fn(self) -> |&mut Scheduler, ~Task|; -} -impl ClosureConverter for UnsafeTaskReceiver { - fn from_fn(f: |&mut Scheduler, ~Task|) -> UnsafeTaskReceiver { - unsafe { transmute(f) } - } - fn to_fn(self) -> |&mut Scheduler, ~Task| { unsafe { transmute(self) } } -} - -// On unix, we read randomness straight from /dev/urandom, but the -// default constructor of an XorShiftRng does this via io::fs, which -// relies on the scheduler existing, so we have to manually load -// randomness. Windows has its own C API for this, so we don't need to -// worry there. -#[cfg(windows)] -fn new_sched_rng() -> XorShiftRng { - XorShiftRng::new() -} -#[cfg(unix)] -fn new_sched_rng() -> XorShiftRng { - use libc; - use mem; - use c_str::ToCStr; - use vec::MutableVector; - use iter::Iterator; - use rand::SeedableRng; - - let fd = "/dev/urandom".with_c_str(|name| { - unsafe { libc::open(name, libc::O_RDONLY, 0) } - }); - if fd == -1 { - rtabort!("could not open /dev/urandom for reading.") - } - - let mut seeds = [0u32, .. 4]; - let size = mem::size_of_val(&seeds); - loop { - let nbytes = unsafe { - libc::read(fd, - seeds.as_mut_ptr() as *mut libc::c_void, - size as libc::size_t) - }; - rtassert!(nbytes as uint == size); - - if !seeds.iter().all(|x| *x == 0) { - break; - } - } - - unsafe {libc::close(fd);} - - SeedableRng::from_seed(seeds) -} - -#[cfg(test)] -mod test { - use prelude::*; - - use borrow::to_uint; - use rt::deque::BufferPool; - use rt::basic; - use rt::sched::{Scheduler}; - use rt::task::{Task, Sched}; - use rt::test::*; - use rt::thread::Thread; - use rt::util; - use task::TaskResult; - use unstable::run_in_bare_thread; - - #[test] - fn trivial_run_in_newsched_task_test() { - let mut task_ran = false; - let task_ran_ptr: *mut bool = &mut task_ran; - do run_in_newsched_task || { - unsafe { *task_ran_ptr = true }; - rtdebug!("executed from the new scheduler") - } - assert!(task_ran); - } - - #[test] - fn multiple_task_test() { - let total = 10; - let mut task_run_count = 0; - let task_run_count_ptr: *mut uint = &mut task_run_count; - do run_in_newsched_task || { - for _ in range(0u, total) { - do spawntask || { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1}; - } - } - } - assert!(task_run_count == total); - } - - #[test] - fn multiple_task_nested_test() { - let mut task_run_count = 0; - let task_run_count_ptr: *mut uint = &mut task_run_count; - do run_in_newsched_task || { - do spawntask || { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; - do spawntask || { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; - do spawntask || { - unsafe { *task_run_count_ptr = *task_run_count_ptr + 1 }; - } - } - } - } - assert!(task_run_count == 3); - } - - // Confirm that a sched_id actually is the uint form of the - // pointer to the scheduler struct. - #[test] - fn simple_sched_id_test() { - do run_in_bare_thread { - let sched = ~new_test_uv_sched(); - assert!(to_uint(sched) == sched.sched_id()); - } - } - - // Compare two scheduler ids that are different, this should never - // fail but may catch a mistake someday. - #[test] - fn compare_sched_id_test() { - do run_in_bare_thread { - let sched_one = ~new_test_uv_sched(); - let sched_two = ~new_test_uv_sched(); - assert!(sched_one.sched_id() != sched_two.sched_id()); - } - } - - - // A very simple test that confirms that a task executing on the - // home scheduler notices that it is home. - #[test] - fn test_home_sched() { - do run_in_bare_thread { - let mut task_ran = false; - let task_ran_ptr: *mut bool = &mut task_ran; - - let mut sched = ~new_test_uv_sched(); - let sched_handle = sched.make_handle(); - - let mut task = ~do Task::new_root_homed(&mut sched.stack_pool, None, - Sched(sched_handle)) { - unsafe { *task_ran_ptr = true }; - assert!(Task::on_appropriate_sched()); - }; - - let on_exit: proc(TaskResult) = proc(exit_status) { - rtassert!(exit_status.is_ok()) - }; - task.death.on_exit = Some(on_exit); - - sched.bootstrap(task); - } - } - - // An advanced test that checks all four possible states that a - // (task,sched) can be in regarding homes. - - #[test] - fn test_schedule_home_states() { - use rt::sleeper_list::SleeperList; - use rt::sched::Shutdown; - use borrow; - - do run_in_bare_thread { - - let sleepers = SleeperList::new(); - let mut pool = BufferPool::new(); - let (normal_worker, normal_stealer) = pool.deque(); - let (special_worker, special_stealer) = pool.deque(); - let queues = ~[normal_stealer, special_stealer]; - - // Our normal scheduler - let mut normal_sched = ~Scheduler::new( - basic::event_loop(), - normal_worker, - queues.clone(), - sleepers.clone()); - - let normal_handle = normal_sched.make_handle(); - - let friend_handle = normal_sched.make_handle(); - - // Our special scheduler - let mut special_sched = ~Scheduler::new_special( - basic::event_loop(), - special_worker, - queues.clone(), - sleepers.clone(), - false, - Some(friend_handle)); - - let special_handle = special_sched.make_handle(); - - let t1_handle = special_sched.make_handle(); - let t4_handle = special_sched.make_handle(); - - // Four test tasks: - // 1) task is home on special - // 2) task not homed, sched doesn't care - // 3) task not homed, sched requeues - // 4) task not home, send home - - let task1 = ~do Task::new_root_homed(&mut special_sched.stack_pool, None, - Sched(t1_handle)) || { - rtassert!(Task::on_appropriate_sched()); - }; - rtdebug!("task1 id: **{}**", borrow::to_uint(task1)); - - let task2 = ~do Task::new_root(&mut normal_sched.stack_pool, None) { - rtassert!(Task::on_appropriate_sched()); - }; - - let task3 = ~do Task::new_root(&mut normal_sched.stack_pool, None) { - rtassert!(Task::on_appropriate_sched()); - }; - - let task4 = ~do Task::new_root_homed(&mut special_sched.stack_pool, None, - Sched(t4_handle)) { - rtassert!(Task::on_appropriate_sched()); - }; - rtdebug!("task4 id: **{}**", borrow::to_uint(task4)); - - // Signal from the special task that we are done. - let (port, chan) = Chan::<()>::new(); - - let normal_task = ~do Task::new_root(&mut normal_sched.stack_pool, None) { - rtdebug!("*about to submit task2*"); - Scheduler::run_task(task2); - rtdebug!("*about to submit task4*"); - Scheduler::run_task(task4); - rtdebug!("*normal_task done*"); - port.recv(); - let mut nh = normal_handle; - nh.send(Shutdown); - let mut sh = special_handle; - sh.send(Shutdown); - }; - - rtdebug!("normal task: {}", borrow::to_uint(normal_task)); - - let special_task = ~do Task::new_root(&mut special_sched.stack_pool, None) { - rtdebug!("*about to submit task1*"); - Scheduler::run_task(task1); - rtdebug!("*about to submit task3*"); - Scheduler::run_task(task3); - rtdebug!("*done with special_task*"); - chan.send(()); - }; - - rtdebug!("special task: {}", borrow::to_uint(special_task)); - - let normal_sched = normal_sched; - let normal_thread = do Thread::start { - normal_sched.bootstrap(normal_task); - rtdebug!("finished with normal_thread"); - }; - - let special_sched = special_sched; - let special_thread = do Thread::start { - special_sched.bootstrap(special_task); - rtdebug!("finished with special_sched"); - }; - - normal_thread.join(); - special_thread.join(); - } - } - - #[test] - fn test_stress_schedule_task_states() { - if util::limit_thread_creation_due_to_osx_and_valgrind() { return; } - let n = stress_factor() * 120; - for _ in range(0, n as int) { - test_schedule_home_states(); - } - } - - #[test] - fn test_io_callback() { - use io::timer; - - // This is a regression test that when there are no schedulable tasks - // in the work queue, but we are performing I/O, that once we do put - // something in the work queue again the scheduler picks it up and doesn't - // exit before emptying the work queue - do run_in_uv_task { - do spawntask { - timer::sleep(10); - } - } - } - - #[test] - fn handle() { - do run_in_bare_thread { - let (port, chan) = Chan::new(); - - let thread_one = do Thread::start { - let chan = chan; - do run_in_newsched_task_core { - chan.send(()); - } - }; - - let thread_two = do Thread::start { - let port = port; - do run_in_newsched_task_core { - port.recv(); - } - }; - - thread_two.join(); - thread_one.join(); - } - } - - // A regression test that the final message is always handled. - // Used to deadlock because Shutdown was never recvd. - #[test] - fn no_missed_messages() { - use rt::sleeper_list::SleeperList; - use rt::stack::StackPool; - use rt::sched::{Shutdown, TaskFromFriend}; - - do run_in_bare_thread { - stress_factor().times(|| { - let sleepers = SleeperList::new(); - let mut pool = BufferPool::new(); - let (worker, stealer) = pool.deque(); - - let mut sched = ~Scheduler::new( - basic::event_loop(), - worker, - ~[stealer], - sleepers.clone()); - - let mut handle = sched.make_handle(); - - let sched = sched; - let thread = do Thread::start { - let mut sched = sched; - let bootstrap_task = - ~Task::new_root(&mut sched.stack_pool, - None, - proc()()); - sched.bootstrap(bootstrap_task); - }; - - let mut stack_pool = StackPool::new(); - let task = ~Task::new_root(&mut stack_pool, None, proc()()); - handle.send(TaskFromFriend(task)); - - handle.send(Shutdown); - drop(handle); - - thread.join(); - }) - } - } - - #[test] - fn multithreading() { - use num::Times; - use vec::OwnedVector; - use container::Container; - - do run_in_mt_newsched_task { - let mut ports = ~[]; - 10.times(|| { - let (port, chan) = Chan::new(); - do spawntask_later { - chan.send(()); - } - ports.push(port); - }); - - while !ports.is_empty() { - ports.pop().recv(); - } - } - } - - #[test] - fn thread_ring() { - do run_in_mt_newsched_task { - let (end_port, end_chan) = Chan::new(); - - let n_tasks = 10; - let token = 2000; - - let (mut p, ch1) = Chan::new(); - ch1.send((token, end_chan)); - let mut i = 2; - while i <= n_tasks { - let (next_p, ch) = Chan::new(); - let imm_i = i; - let imm_p = p; - do spawntask_random { - roundtrip(imm_i, n_tasks, &imm_p, &ch); - }; - p = next_p; - i += 1; - } - let p = p; - do spawntask_random { - roundtrip(1, n_tasks, &p, &ch1); - } - - end_port.recv(); - } - - fn roundtrip(id: int, n_tasks: int, - p: &Port<(int, Chan<()>)>, - ch: &Chan<(int, Chan<()>)>) { - while (true) { - match p.recv() { - (1, end_chan) => { - debug!("{}\n", id); - end_chan.send(()); - return; - } - (token, end_chan) => { - debug!("thread: {} got token: {}", id, token); - ch.send((token - 1, end_chan)); - if token <= n_tasks { - return; - } - } - } - } - } - } - - #[test] - fn start_closure_dtor() { - use ops::Drop; - - // Regression test that the `start` task entrypoint can - // contain dtors that use task resources - do run_in_newsched_task { - struct S { field: () } - - impl Drop for S { - fn drop(&mut self) { - let _foo = @0; - } - } - - let s = S { field: () }; - - do spawntask { - let _ss = &s; - } - } - } - - // FIXME: #9407: xfail-test - #[ignore] - #[test] - fn dont_starve_1() { - stress_factor().times(|| { - do run_in_mt_newsched_task { - let (port, chan) = Chan::new(); - - // This task should not be able to starve the sender; - // The sender should get stolen to another thread. - do spawntask { - while port.try_recv().is_none() { } - } - - chan.send(()); - } - }) - } - - #[test] - fn dont_starve_2() { - stress_factor().times(|| { - do run_in_newsched_task { - let (port, chan) = Chan::new(); - let (_port2, chan2) = Chan::new(); - - // This task should not be able to starve the other task. - // The sends should eventually yield. - do spawntask { - while port.try_recv().is_none() { - chan2.send(()); - } - } - - chan.send(()); - } - }) - } - - // Regression test for a logic bug that would cause single-threaded schedulers - // to sleep forever after yielding and stealing another task. - #[test] - fn single_threaded_yield() { - use task::{spawn, spawn_sched, SingleThreaded, deschedule}; - use num::Times; - - do spawn_sched(SingleThreaded) { - 5.times(|| { deschedule(); }) - } - do spawn { } - do spawn { } - } -} diff --git a/src/libstd/rt/sleeper_list.rs b/src/libstd/rt/sleeper_list.rs deleted file mode 100644 index 39c7431837f..00000000000 --- a/src/libstd/rt/sleeper_list.rs +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! Maintains a shared list of sleeping schedulers. Schedulers -//! use this to wake each other up. - -use rt::sched::SchedHandle; -use rt::mpmc_bounded_queue::Queue; -use option::*; -use clone::Clone; - -pub struct SleeperList { - priv q: Queue, -} - -impl SleeperList { - pub fn new() -> SleeperList { - SleeperList{q: Queue::with_capacity(8*1024)} - } - - pub fn push(&mut self, value: SchedHandle) { - assert!(self.q.push(value)) - } - - pub fn pop(&mut self) -> Option { - self.q.pop() - } - - pub fn casual_pop(&mut self) -> Option { - self.q.pop() - } -} - -impl Clone for SleeperList { - fn clone(&self) -> SleeperList { - SleeperList { - q: self.q.clone() - } - } -} diff --git a/src/libstd/rt/stack.rs b/src/libstd/rt/stack.rs deleted file mode 100644 index 44b60e955d2..00000000000 --- a/src/libstd/rt/stack.rs +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use container::Container; -use ptr::RawPtr; -use vec; -use ops::Drop; -use libc::{c_uint, uintptr_t}; - -pub struct StackSegment { - priv buf: ~[u8], - priv valgrind_id: c_uint -} - -impl StackSegment { - pub fn new(size: uint) -> StackSegment { - unsafe { - // Crate a block of uninitialized values - let mut stack = vec::with_capacity(size); - stack.set_len(size); - - let mut stk = StackSegment { - buf: stack, - valgrind_id: 0 - }; - - // XXX: Using the FFI to call a C macro. Slow - stk.valgrind_id = rust_valgrind_stack_register(stk.start(), stk.end()); - return stk; - } - } - - /// Point to the low end of the allocated stack - pub fn start(&self) -> *uint { - self.buf.as_ptr() as *uint - } - - /// Point one word beyond the high end of the allocated stack - pub fn end(&self) -> *uint { - unsafe { - self.buf.as_ptr().offset(self.buf.len() as int) as *uint - } - } -} - -impl Drop for StackSegment { - fn drop(&mut self) { - unsafe { - // XXX: Using the FFI to call a C macro. Slow - rust_valgrind_stack_deregister(self.valgrind_id); - } - } -} - -pub struct StackPool(()); - -impl StackPool { - pub fn new() -> StackPool { StackPool(()) } - - fn take_segment(&self, min_size: uint) -> StackSegment { - StackSegment::new(min_size) - } - - fn give_segment(&self, _stack: StackSegment) { - } -} - -extern { - fn rust_valgrind_stack_register(start: *uintptr_t, end: *uintptr_t) -> c_uint; - fn rust_valgrind_stack_deregister(id: c_uint); -} diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 30e05e9091f..7602d7b0564 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -13,29 +13,31 @@ //! local storage, and logging. Even a 'freestanding' Rust would likely want //! to implement this. -use super::local_heap::LocalHeap; - -use prelude::*; - +use any::AnyOwnExt; use borrow; use cleanup; use io::Writer; use libc::{c_char, size_t}; use local_data; +use ops::Drop; use option::{Option, Some, None}; +use prelude::drop; +use result::{Result, Ok, Err}; +use rt::Runtime; use rt::borrowck::BorrowRecord; use rt::borrowck; -use rt::context::Context; -use rt::env; -use rt::kill::Death; use rt::local::Local; +use rt::local_heap::LocalHeap; use rt::logging::StdErrLogger; -use rt::sched::{Scheduler, SchedHandle}; -use rt::stack::{StackSegment, StackPool}; +use rt::rtio::LocalIo; use rt::unwind::Unwinder; use send_str::SendStr; +use sync::arc::UnsafeArc; +use sync::atomics::{AtomicUint, SeqCst}; +use task::{TaskResult, TaskOpts}; use unstable::finally::Finally; -use unstable::mutex::Mutex; + +#[cfg(stage0)] pub use rt::unwind::begin_unwind; // The Task struct represents all state associated with a rust // task. There are at this point two primary "subtypes" of task, @@ -45,201 +47,89 @@ use unstable::mutex::Mutex; pub struct Task { heap: LocalHeap, - priv gc: GarbageCollector, + gc: GarbageCollector, storage: LocalStorage, - logger: Option, unwinder: Unwinder, death: Death, destroyed: bool, name: Option, - coroutine: Option, - sched: Option<~Scheduler>, - task_type: TaskType, // Dynamic borrowck debugging info borrow_list: Option<~[BorrowRecord]>, + + logger: Option, stdout_handle: Option<~Writer>, - // See the comments in the scheduler about why this is necessary - nasty_deschedule_lock: Mutex, + priv imp: Option<~Runtime>, } -pub enum TaskType { - GreenTask(Option), - SchedTask -} +pub struct GarbageCollector; +pub struct LocalStorage(Option); -/// A coroutine is nothing more than a (register context, stack) pair. -pub struct Coroutine { - /// The segment of stack on which the task is currently running or - /// if the task is blocked, on which the task will resume - /// execution. - /// - /// Servo needs this to be public in order to tell SpiderMonkey - /// about the stack bounds. - current_stack_segment: StackSegment, - /// Always valid if the task is alive and not running. - saved_context: Context +/// A handle to a blocked task. Usually this means having the ~Task pointer by +/// ownership, but if the task is killable, a killer can steal it at any time. +pub enum BlockedTask { + Owned(~Task), + Shared(UnsafeArc), } -/// Some tasks have a dedicated home scheduler that they must run on. -pub enum SchedHome { - AnySched, - Sched(SchedHandle) +/// Per-task state related to task death, killing, failure, etc. +pub struct Death { + // Action to be done with the exit code. If set, also makes the task wait + // until all its watched children exit before collecting the status. + on_exit: Option, } -pub struct GarbageCollector; -pub struct LocalStorage(Option); +pub struct BlockedTaskIterator { + priv inner: UnsafeArc, +} impl Task { - - // A helper to build a new task using the dynamically found - // scheduler and task. Only works in GreenTask context. - pub fn build_homed_child(stack_size: Option, - f: proc(), - home: SchedHome) - -> ~Task { - let mut running_task = Local::borrow(None::); - let mut sched = running_task.get().sched.take_unwrap(); - let new_task = ~running_task.get() - .new_child_homed(&mut sched.stack_pool, - stack_size, - home, - f); - running_task.get().sched = Some(sched); - new_task - } - - pub fn build_child(stack_size: Option, f: proc()) -> ~Task { - Task::build_homed_child(stack_size, f, AnySched) - } - - pub fn build_homed_root(stack_size: Option, - f: proc(), - home: SchedHome) - -> ~Task { - let mut running_task = Local::borrow(None::); - let mut sched = running_task.get().sched.take_unwrap(); - let new_task = ~Task::new_root_homed(&mut sched.stack_pool, - stack_size, - home, - f); - running_task.get().sched = Some(sched); - new_task - } - - pub fn build_root(stack_size: Option, f: proc()) -> ~Task { - Task::build_homed_root(stack_size, f, AnySched) - } - - pub fn new_sched_task() -> Task { - Task { - heap: LocalHeap::new(), - gc: GarbageCollector, - storage: LocalStorage(None), - logger: None, - unwinder: Unwinder { unwinding: false, cause: None }, - death: Death::new(), - destroyed: false, - coroutine: Some(Coroutine::empty()), - name: None, - sched: None, - task_type: SchedTask, - borrow_list: None, - stdout_handle: None, - nasty_deschedule_lock: unsafe { Mutex::new() }, - } - } - - pub fn new_root(stack_pool: &mut StackPool, - stack_size: Option, - start: proc()) -> Task { - Task::new_root_homed(stack_pool, stack_size, AnySched, start) - } - - pub fn new_child(&mut self, - stack_pool: &mut StackPool, - stack_size: Option, - start: proc()) -> Task { - self.new_child_homed(stack_pool, stack_size, AnySched, start) - } - - pub fn new_root_homed(stack_pool: &mut StackPool, - stack_size: Option, - home: SchedHome, - start: proc()) -> Task { + pub fn new() -> Task { Task { heap: LocalHeap::new(), gc: GarbageCollector, storage: LocalStorage(None), - logger: None, - unwinder: Unwinder { unwinding: false, cause: None }, + unwinder: Unwinder::new(), death: Death::new(), destroyed: false, name: None, - coroutine: Some(Coroutine::new(stack_pool, stack_size, start)), - sched: None, - task_type: GreenTask(Some(home)), borrow_list: None, - stdout_handle: None, - nasty_deschedule_lock: unsafe { Mutex::new() }, - } - } - - pub fn new_child_homed(&mut self, - stack_pool: &mut StackPool, - stack_size: Option, - home: SchedHome, - start: proc()) -> Task { - Task { - heap: LocalHeap::new(), - gc: GarbageCollector, - storage: LocalStorage(None), logger: None, - unwinder: Unwinder { unwinding: false, cause: None }, - death: Death::new(), - destroyed: false, - name: None, - coroutine: Some(Coroutine::new(stack_pool, stack_size, start)), - sched: None, - task_type: GreenTask(Some(home)), - borrow_list: None, stdout_handle: None, - nasty_deschedule_lock: unsafe { Mutex::new() }, + imp: None, } } - pub fn give_home(&mut self, new_home: SchedHome) { - match self.task_type { - GreenTask(ref mut home) => { - *home = Some(new_home); - } - SchedTask => { - rtabort!("type error: used SchedTask as GreenTask"); - } - } - } - - pub fn take_unwrap_home(&mut self) -> SchedHome { - match self.task_type { - GreenTask(ref mut home) => { - let out = home.take_unwrap(); - return out; - } - SchedTask => { - rtabort!("type error: used SchedTask as GreenTask"); - } - } - } - - pub fn run(&mut self, f: ||) { - rtdebug!("run called on task: {}", borrow::to_uint(self)); + /// Executes the given closure as if it's running inside this task. The task + /// is consumed upon entry, and the destroyed task is returned from this + /// function in order for the caller to free. This function is guaranteed to + /// not unwind because the closure specified is run inside of a `rust_try` + /// block. (this is the only try/catch block in the world). + /// + /// This function is *not* meant to be abused as a "try/catch" block. This + /// is meant to be used at the absolute boundaries of a task's lifetime, and + /// only for that purpose. + pub fn run(~self, f: ||) -> ~Task { + // Need to put ourselves into TLS, but also need access to the unwinder. + // Unsafely get a handle to the task so we can continue to use it after + // putting it in tls (so we can invoke the unwinder). + let handle: *mut Task = unsafe { + *cast::transmute::<&~Task, &*mut Task>(&self) + }; + Local::put(self); // The only try/catch block in the world. Attempt to run the task's // client-specified code and catch any failures. - self.unwinder.try(|| { + let try_block = || { // Run the task main function, then do some cleanup. f.finally(|| { + fn flush(w: Option<~Writer>) { + match w { + Some(mut w) => { w.flush(); } + None => {} + } + } // First, destroy task-local storage. This may run user dtors. // @@ -260,7 +150,10 @@ impl Task { // TLS, or possibly some destructors for those objects being // annihilated invoke TLS. Sadly these two operations seemed to // be intertwined, and miraculously work for now... - self.storage.take(); + let mut task = Local::borrow(None::); + let storage = task.get().storage.take(); + drop(task); + drop(storage); // Destroy remaining boxes. Also may run user dtors. unsafe { cleanup::annihilate(); } @@ -268,77 +161,112 @@ impl Task { // Finally flush and destroy any output handles which the task // owns. There are no boxes here, and no user destructors should // run after this any more. - match self.stdout_handle.take() { - Some(handle) => { - let mut handle = handle; - handle.flush(); - } - None => {} - } - self.logger.take(); + let mut task = Local::borrow(None::); + let stdout = task.get().stdout_handle.take(); + let logger = task.get().logger.take(); + drop(task); + + flush(stdout); + drop(logger); }) - }); + }; + + unsafe { (*handle).unwinder.try(try_block); } // Cleanup the dynamic borrowck debugging info borrowck::clear_task_borrow_list(); - self.death.collect_failure(self.unwinder.result()); - self.destroyed = true; + let mut me: ~Task = Local::take(); + me.death.collect_failure(me.unwinder.result()); + me.destroyed = true; + return me; } - // New utility functions for homes. + /// Inserts a runtime object into this task, transferring ownership to the + /// task. It is illegal to replace a previous runtime object in this task + /// with this argument. + pub fn put_runtime(&mut self, ops: ~Runtime) { + assert!(self.imp.is_none()); + self.imp = Some(ops); + } - pub fn is_home_no_tls(&self, sched: &~Scheduler) -> bool { - match self.task_type { - GreenTask(Some(AnySched)) => { false } - GreenTask(Some(Sched(SchedHandle { sched_id: ref id, .. }))) => { - *id == sched.sched_id() - } - GreenTask(None) => { - rtabort!("task without home"); - } - SchedTask => { - // Awe yea - rtabort!("type error: expected: GreenTask, found: SchedTask"); + /// Attempts to extract the runtime as a specific type. If the runtime does + /// not have the provided type, then the runtime is not removed. If the + /// runtime does have the specified type, then it is removed and returned + /// (transfer of ownership). + /// + /// It is recommended to only use this method when *absolutely necessary*. + /// This function may not be available in the future. + pub fn maybe_take_runtime(&mut self) -> Option<~T> { + // This is a terrible, terrible function. The general idea here is to + // take the runtime, cast it to ~Any, check if it has the right type, + // and then re-cast it back if necessary. The method of doing this is + // pretty sketchy and involves shuffling vtables of trait objects + // around, but it gets the job done. + // + // XXX: This function is a serious code smell and should be avoided at + // all costs. I have yet to think of a method to avoid this + // function, and I would be saddened if more usage of the function + // crops up. + unsafe { + let imp = self.imp.take_unwrap(); + let &(vtable, _): &(uint, uint) = cast::transmute(&imp); + match imp.wrap().move::() { + Ok(t) => Some(t), + Err(t) => { + let (_, obj): (uint, uint) = cast::transmute(t); + let obj: ~Runtime = cast::transmute((vtable, obj)); + self.put_runtime(obj); + None + } } } } - pub fn homed(&self) -> bool { - match self.task_type { - GreenTask(Some(AnySched)) => { false } - GreenTask(Some(Sched(SchedHandle { .. }))) => { true } - GreenTask(None) => { - rtabort!("task without home"); - } - SchedTask => { - rtabort!("type error: expected: GreenTask, found: SchedTask"); - } - } + /// Spawns a sibling to this task. The newly spawned task is configured with + /// the `opts` structure and will run `f` as the body of its code. + pub fn spawn_sibling(mut ~self, opts: TaskOpts, f: proc()) { + let ops = self.imp.take_unwrap(); + ops.spawn_sibling(self, opts, f) } - // Grab both the scheduler and the task from TLS and check if the - // task is executing on an appropriate scheduler. - pub fn on_appropriate_sched() -> bool { - let mut task = Local::borrow(None::); - let sched_id = task.get().sched.get_ref().sched_id(); - let sched_run_anything = task.get().sched.get_ref().run_anything; - match task.get().task_type { - GreenTask(Some(AnySched)) => { - rtdebug!("anysched task in sched check ****"); - sched_run_anything - } - GreenTask(Some(Sched(SchedHandle { sched_id: ref id, ..}))) => { - rtdebug!("homed task in sched check ****"); - *id == sched_id - } - GreenTask(None) => { - rtabort!("task without home"); - } - SchedTask => { - rtabort!("type error: expected: GreenTask, found: SchedTask"); - } - } + /// Deschedules the current task, invoking `f` `amt` times. It is not + /// recommended to use this function directly, but rather communication + /// primitives in `std::comm` should be used. + pub fn deschedule(mut ~self, amt: uint, + f: |BlockedTask| -> Result<(), BlockedTask>) { + let ops = self.imp.take_unwrap(); + ops.deschedule(amt, self, f) + } + + /// Wakes up a previously blocked task, optionally specifiying whether the + /// current task can accept a change in scheduling. This function can only + /// be called on tasks that were previously blocked in `deschedule`. + pub fn reawaken(mut ~self, can_resched: bool) { + let ops = self.imp.take_unwrap(); + ops.reawaken(self, can_resched); + } + + /// Yields control of this task to another task. This function will + /// eventually return, but possibly not immediately. This is used as an + /// opportunity to allow other tasks a chance to run. + pub fn yield_now(mut ~self) { + let ops = self.imp.take_unwrap(); + ops.yield_now(self); + } + + /// Similar to `yield_now`, except that this function may immediately return + /// without yielding (depending on what the runtime decides to do). + pub fn maybe_yield(mut ~self) { + let ops = self.imp.take_unwrap(); + ops.maybe_yield(self); + } + + /// Acquires a handle to the I/O factory that this task contains, normally + /// stored in the task's runtime. This factory may not always be available, + /// which is why the return type is `Option` + pub fn local_io<'a>(&'a mut self) -> Option> { + self.imp.get_mut_ref().local_io() } } @@ -346,253 +274,101 @@ impl Drop for Task { fn drop(&mut self) { rtdebug!("called drop for a task: {}", borrow::to_uint(self)); rtassert!(self.destroyed); - - unsafe { self.nasty_deschedule_lock.destroy(); } } } -// Coroutines represent nothing more than a context and a stack -// segment. - -impl Coroutine { - - pub fn new(stack_pool: &mut StackPool, - stack_size: Option, - start: proc()) - -> Coroutine { - let stack_size = match stack_size { - Some(size) => size, - None => env::min_stack() - }; - let start = Coroutine::build_start_wrapper(start); - let mut stack = stack_pool.take_segment(stack_size); - let initial_context = Context::new(start, &mut stack); - Coroutine { - current_stack_segment: stack, - saved_context: initial_context - } +impl Iterator for BlockedTaskIterator { + fn next(&mut self) -> Option { + Some(Shared(self.inner.clone())) } +} - pub fn empty() -> Coroutine { - Coroutine { - current_stack_segment: StackSegment::new(0), - saved_context: Context::empty() +impl BlockedTask { + /// Returns Some if the task was successfully woken; None if already killed. + pub fn wake(self) -> Option<~Task> { + match self { + Owned(task) => Some(task), + Shared(arc) => unsafe { + match (*arc.get()).swap(0, SeqCst) { + 0 => None, + n => Some(cast::transmute(n)), + } + } } } - fn build_start_wrapper(start: proc()) -> proc() { - let wrapper: proc() = proc() { - // First code after swap to this new context. Run our - // cleanup job. - unsafe { + // This assertion has two flavours because the wake involves an atomic op. + // In the faster version, destructors will fail dramatically instead. + #[cfg(not(test))] pub fn trash(self) { } + #[cfg(test)] pub fn trash(self) { assert!(self.wake().is_none()); } - // Again - might work while safe, or it might not. - { - let mut sched = Local::borrow(None::); - sched.get().run_cleanup_job(); - } + /// Create a blocked task, unless the task was already killed. + pub fn block(task: ~Task) -> BlockedTask { + Owned(task) + } - // To call the run method on a task we need a direct - // reference to it. The task is in TLS, so we can - // simply unsafe_borrow it to get this reference. We - // need to still have the task in TLS though, so we - // need to unsafe_borrow. - let task: *mut Task = Local::unsafe_borrow(); - - let mut start_cell = Some(start); - (*task).run(|| { - // N.B. Removing `start` from the start wrapper - // closure by emptying a cell is critical for - // correctness. The ~Task pointer, and in turn the - // closure used to initialize the first call - // frame, is destroyed in the scheduler context, - // not task context. So any captured closures must - // not contain user-definable dtors that expect to - // be in task context. By moving `start` out of - // the closure, all the user code goes our of - // scope while the task is still running. - let start = start_cell.take_unwrap(); - start(); - }); + /// Converts one blocked task handle to a list of many handles to the same. + pub fn make_selectable(self, num_handles: uint) -> Take + { + let arc = match self { + Owned(task) => { + let flag = unsafe { AtomicUint::new(cast::transmute(task)) }; + UnsafeArc::new(flag) } - - // We remove the sched from the Task in TLS right now. - let sched: ~Scheduler = Local::take(); - // ... allowing us to give it away when performing a - // scheduling operation. - sched.terminate_current_task() + Shared(arc) => arc.clone(), }; - return wrapper; + BlockedTaskIterator{ inner: arc }.take(num_handles) } - /// Destroy coroutine and try to reuse stack segment. - pub fn recycle(self, stack_pool: &mut StackPool) { + /// Convert to an unsafe uint value. Useful for storing in a pipe's state + /// flag. + #[inline] + pub unsafe fn cast_to_uint(self) -> uint { match self { - Coroutine { current_stack_segment, .. } => { - stack_pool.give_segment(current_stack_segment); + Owned(task) => { + let blocked_task_ptr: uint = cast::transmute(task); + rtassert!(blocked_task_ptr & 0x1 == 0); + blocked_task_ptr + } + Shared(arc) => { + let blocked_task_ptr: uint = cast::transmute(~arc); + rtassert!(blocked_task_ptr & 0x1 == 0); + blocked_task_ptr | 0x1 } } } -} - -/// This function is invoked from rust's current __morestack function. Segmented -/// stacks are currently not enabled as segmented stacks, but rather one giant -/// stack segment. This means that whenever we run out of stack, we want to -/// truly consider it to be stack overflow rather than allocating a new stack. -#[no_mangle] // - this is called from C code -#[no_split_stack] // - it would be sad for this function to trigger __morestack -#[doc(hidden)] // - Function must be `pub` to get exported, but it's - // irrelevant for documentation purposes. -#[cfg(not(test))] // in testing, use the original libstd's version -pub extern "C" fn rust_stack_exhausted() { - use rt::context; - use rt::in_green_task_context; - use rt::task::Task; - use rt::local::Local; - use unstable::intrinsics; - - unsafe { - // We're calling this function because the stack just ran out. We need - // to call some other rust functions, but if we invoke the functions - // right now it'll just trigger this handler being called again. In - // order to alleviate this, we move the stack limit to be inside of the - // red zone that was allocated for exactly this reason. - let limit = context::get_sp_limit(); - context::record_sp_limit(limit - context::RED_ZONE / 2); - - // This probably isn't the best course of action. Ideally one would want - // to unwind the stack here instead of just aborting the entire process. - // This is a tricky problem, however. There's a few things which need to - // be considered: - // - // 1. We're here because of a stack overflow, yet unwinding will run - // destructors and hence arbitrary code. What if that code overflows - // the stack? One possibility is to use the above allocation of an - // extra 10k to hope that we don't hit the limit, and if we do then - // abort the whole program. Not the best, but kind of hard to deal - // with unless we want to switch stacks. - // - // 2. LLVM will optimize functions based on whether they can unwind or - // not. It will flag functions with 'nounwind' if it believes that - // the function cannot trigger unwinding, but if we do unwind on - // stack overflow then it means that we could unwind in any function - // anywhere. We would have to make sure that LLVM only places the - // nounwind flag on functions which don't call any other functions. - // - // 3. The function that overflowed may have owned arguments. These - // arguments need to have their destructors run, but we haven't even - // begun executing the function yet, so unwinding will not run the - // any landing pads for these functions. If this is ignored, then - // the arguments will just be leaked. - // - // Exactly what to do here is a very delicate topic, and is possibly - // still up in the air for what exactly to do. Some relevant issues: - // - // #3555 - out-of-stack failure leaks arguments - // #3695 - should there be a stack limit? - // #9855 - possible strategies which could be taken - // #9854 - unwinding on windows through __morestack has never worked - // #2361 - possible implementation of not using landing pads - - if in_green_task_context() { - let mut task = Local::borrow(None::); - let n = task.get() - .name - .as_ref() - .map(|n| n.as_slice()) - .unwrap_or(""); - - // See the message below for why this is not emitted to the - // task's logger. This has the additional conundrum of the - // logger may not be initialized just yet, meaning that an FFI - // call would happen to initialized it (calling out to libuv), - // and the FFI call needs 2MB of stack when we just ran out. - rterrln!("task '{}' has overflowed its stack", n); + /// Convert from an unsafe uint value. Useful for retrieving a pipe's state + /// flag. + #[inline] + pub unsafe fn cast_from_uint(blocked_task_ptr: uint) -> BlockedTask { + if blocked_task_ptr & 0x1 == 0 { + Owned(cast::transmute(blocked_task_ptr)) } else { - rterrln!("stack overflow in non-task context"); + let ptr: ~UnsafeArc = + cast::transmute(blocked_task_ptr & !1); + Shared(*ptr) } - - intrinsics::abort(); } } -/// This is the entry point of unwinding for things like lang items and such. -/// The arguments are normally generated by the compiler, and need to -/// have static lifetimes. -pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! { - use c_str::CString; - use cast::transmute; +impl Death { + pub fn new() -> Death { + Death { on_exit: None, } + } - #[inline] - fn static_char_ptr(p: *c_char) -> &'static str { - let s = unsafe { CString::new(p, false) }; - match s.as_str() { - Some(s) => unsafe { transmute::<&str, &'static str>(s) }, - None => rtabort!("message wasn't utf8?") + /// Collect failure exit codes from children and propagate them to a parent. + pub fn collect_failure(&mut self, result: TaskResult) { + match self.on_exit.take() { + Some(f) => f(result), + None => {} } } - - let msg = static_char_ptr(msg); - let file = static_char_ptr(file); - - begin_unwind(msg, file, line as uint) } -/// This is the entry point of unwinding for fail!() and assert!(). -pub fn begin_unwind(msg: M, file: &'static str, line: uint) -> ! { - use any::AnyRefExt; - use rt::in_green_task_context; - use rt::local::Local; - use rt::task::Task; - use str::Str; - use unstable::intrinsics; - - unsafe { - let task: *mut Task; - // Note that this should be the only allocation performed in this block. - // Currently this means that fail!() on OOM will invoke this code path, - // but then again we're not really ready for failing on OOM anyway. If - // we do start doing this, then we should propagate this allocation to - // be performed in the parent of this task instead of the task that's - // failing. - let msg = ~msg as ~Any; - - { - //let msg: &Any = msg; - let msg_s = match msg.as_ref::<&'static str>() { - Some(s) => *s, - None => match msg.as_ref::<~str>() { - Some(s) => s.as_slice(), - None => "~Any", - } - }; - - if !in_green_task_context() { - rterrln!("failed in non-task context at '{}', {}:{}", - msg_s, file, line); - intrinsics::abort(); - } - - task = Local::unsafe_borrow(); - let n = (*task).name.as_ref().map(|n| n.as_slice()).unwrap_or(""); - - // XXX: this should no get forcibly printed to the console, this should - // either be sent to the parent task (ideally), or get printed to - // the task's logger. Right now the logger is actually a uvio - // instance, which uses unkillable blocks internally for various - // reasons. This will cause serious trouble if the task is failing - // due to mismanagment of its own kill flag, so calling our own - // logger in its current state is a bit of a problem. - - rterrln!("task '{}' failed at '{}', {}:{}", n, msg_s, file, line); - - if (*task).unwinder.unwinding { - rtabort!("unwinding again"); - } - } - - (*task).unwinder.begin_unwind(msg); +impl Drop for Death { + fn drop(&mut self) { + // make this type noncopyable } } @@ -690,4 +466,13 @@ mod test { #[test] #[should_fail] fn test_begin_unwind() { begin_unwind("cause", file!(), line!()) } + + // Task blocking tests + + #[test] + fn block_and_wake() { + do with_test_task |task| { + BlockedTask::block(task).wake().unwrap() + } + } } diff --git a/src/libstd/rt/thread.rs b/src/libstd/rt/thread.rs index c72ec3161cd..0542c444a84 100644 --- a/src/libstd/rt/thread.rs +++ b/src/libstd/rt/thread.rs @@ -69,6 +69,12 @@ impl Thread<()> { /// called, when the `Thread` falls out of scope its destructor will block /// waiting for the OS thread. pub fn start(main: proc() -> T) -> Thread { + Thread::start_stack(DEFAULT_STACK_SIZE, main) + } + + /// Performs the same functionality as `start`, but specifies an explicit + /// stack size for the new thread. + pub fn start_stack(stack: uint, main: proc() -> T) -> Thread { // We need the address of the packet to fill in to be stable so when // `main` fills it in it's still valid, so allocate an extra ~ box to do diff --git a/src/libstd/rt/tube.rs b/src/libstd/rt/tube.rs deleted file mode 100644 index 5e867bcdfba..00000000000 --- a/src/libstd/rt/tube.rs +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! A very simple unsynchronized channel type for sending buffered data from -//! scheduler context to task context. -//! -//! XXX: This would be safer to use if split into two types like Port/Chan - -use option::*; -use clone::Clone; -use super::rc::RC; -use rt::sched::Scheduler; -use rt::kill::BlockedTask; -use rt::local::Local; -use vec::OwnedVector; -use container::Container; - -struct TubeState { - blocked_task: Option, - buf: ~[T] -} - -pub struct Tube { - priv p: RC> -} - -impl Tube { - pub fn new() -> Tube { - Tube { - p: RC::new(TubeState { - blocked_task: None, - buf: ~[] - }) - } - } - - pub fn send(&mut self, val: T) { - rtdebug!("tube send"); - unsafe { - let state = self.p.unsafe_borrow_mut(); - (*state).buf.push(val); - - if (*state).blocked_task.is_some() { - // There's a waiting task. Wake it up - rtdebug!("waking blocked tube"); - let task = (*state).blocked_task.take_unwrap(); - let sched: ~Scheduler = Local::take(); - sched.resume_blocked_task_immediately(task); - } - } - } - - pub fn recv(&mut self) -> T { - unsafe { - let state = self.p.unsafe_borrow_mut(); - if !(*state).buf.is_empty() { - return (*state).buf.shift(); - } else { - // Block and wait for the next message - rtdebug!("blocking on tube recv"); - assert!(self.p.refcount() > 1); // There better be somebody to wake us up - assert!((*state).blocked_task.is_none()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|_, task| { - (*state).blocked_task = Some(task); - }); - rtdebug!("waking after tube recv"); - let buf = &mut (*state).buf; - assert!(!buf.is_empty()); - return buf.shift(); - } - } - } -} - -impl Clone for Tube { - fn clone(&self) -> Tube { - Tube { p: self.p.clone() } - } -} - -#[cfg(test)] -mod test { - use rt::test::*; - use rt::rtio::EventLoop; - use rt::sched::Scheduler; - use rt::local::Local; - use super::*; - use prelude::*; - - #[test] - fn simple_test() { - do run_in_newsched_task { - let mut tube: Tube = Tube::new(); - let mut tube_clone = Some(tube.clone()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|sched, task| { - let mut tube_clone = tube_clone.take_unwrap(); - tube_clone.send(1); - sched.enqueue_blocked_task(task); - }); - - assert!(tube.recv() == 1); - } - } - - #[test] - fn blocking_test() { - do run_in_newsched_task { - let mut tube: Tube = Tube::new(); - let mut tube_clone = Some(tube.clone()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|sched, task| { - let tube_clone = tube_clone.take_unwrap(); - do sched.event_loop.callback { - let mut tube_clone = tube_clone; - // The task should be blocked on this now and - // sending will wake it up. - tube_clone.send(1); - } - sched.enqueue_blocked_task(task); - }); - - assert!(tube.recv() == 1); - } - } - - #[test] - fn many_blocking_test() { - static MAX: int = 100; - - do run_in_newsched_task { - let mut tube: Tube = Tube::new(); - let mut tube_clone = Some(tube.clone()); - let sched: ~Scheduler = Local::take(); - sched.deschedule_running_task_and_then(|sched, task| { - callback_send(tube_clone.take_unwrap(), 0); - - fn callback_send(tube: Tube, i: int) { - if i == 100 { - return - } - - let mut sched = Local::borrow(None::); - do sched.get().event_loop.callback { - let mut tube = tube; - // The task should be blocked on this now and - // sending will wake it up. - tube.send(i); - callback_send(tube, i + 1); - } - } - - sched.enqueue_blocked_task(task); - }); - - for i in range(0, MAX) { - let j = tube.recv(); - assert!(j == i); - } - } - } -} diff --git a/src/libstd/rt/unwind.rs b/src/libstd/rt/unwind.rs index 3f6f54a9c0e..8248c6274ca 100644 --- a/src/libstd/rt/unwind.rs +++ b/src/libstd/rt/unwind.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. - // Implementation of Rust stack unwinding // // For background on exception handling and stack unwinding please see "Exception Handling in LLVM" @@ -254,3 +253,74 @@ pub extern "C" fn rust_eh_personality_catch(version: c_int, } } } + +/// This is the entry point of unwinding for things like lang items and such. +/// The arguments are normally generated by the compiler, and need to +/// have static lifetimes. +pub fn begin_unwind_raw(msg: *c_char, file: *c_char, line: size_t) -> ! { + #[inline] + fn static_char_ptr(p: *c_char) -> &'static str { + let s = unsafe { CString::new(p, false) }; + match s.as_str() { + Some(s) => unsafe { cast::transmute::<&str, &'static str>(s) }, + None => rtabort!("message wasn't utf8?") + } + } + + let msg = static_char_ptr(msg); + let file = static_char_ptr(file); + + begin_unwind(msg, file, line as uint) +} + +/// This is the entry point of unwinding for fail!() and assert!(). +pub fn begin_unwind(msg: M, file: &'static str, line: uint) -> ! { + unsafe { + let task: *mut Task; + // Note that this should be the only allocation performed in this block. + // Currently this means that fail!() on OOM will invoke this code path, + // but then again we're not really ready for failing on OOM anyway. If + // we do start doing this, then we should propagate this allocation to + // be performed in the parent of this task instead of the task that's + // failing. + let msg = ~msg as ~Any; + + { + let msg_s = match msg.as_ref::<&'static str>() { + Some(s) => *s, + None => match msg.as_ref::<~str>() { + Some(s) => s.as_slice(), + None => "~Any", + } + }; + + // It is assumed that all reasonable rust code will have a local + // task at all times. This means that this `try_unsafe_borrow` will + // succeed almost all of the time. There are border cases, however, + // when the runtime has *almost* set up the local task, but hasn't + // quite gotten there yet. In order to get some better diagnostics, + // we print on failure and immediately abort the whole process if + // there is no local task available. + match Local::try_unsafe_borrow() { + Some(t) => { + task = t; + let n = (*task).name.as_ref() + .map(|n| n.as_slice()).unwrap_or(""); + + println!("task '{}' failed at '{}', {}:{}", n, msg_s, + file, line); + } + None => { + println!("failed at '{}', {}:{}", msg_s, file, line); + intrinsics::abort(); + } + } + + if (*task).unwinder.unwinding { + rtabort!("unwinding again"); + } + } + + (*task).unwinder.begin_unwind(msg); + } +} diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index 2f3e5be39e6..69c1da39abc 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -15,7 +15,6 @@ use libc; use option::{Some, None, Option}; use os; use str::StrSlice; -use unstable::atomics::{AtomicInt, INIT_ATOMIC_INT, SeqCst}; use unstable::running_on_valgrind; // Indicates whether we should perform expensive sanity checks, including rtassert! @@ -144,13 +143,3 @@ memory and partly incapable of presentation to others.", unsafe { libc::abort() } } } - -static mut EXIT_STATUS: AtomicInt = INIT_ATOMIC_INT; - -pub fn set_exit_status(code: int) { - unsafe { EXIT_STATUS.store(code, SeqCst) } -} - -pub fn get_exit_status() -> int { - unsafe { EXIT_STATUS.load(SeqCst) } -} diff --git a/src/libstd/run.rs b/src/libstd/run.rs index d92291bbfbd..15c0986f899 100644 --- a/src/libstd/run.rs +++ b/src/libstd/run.rs @@ -338,8 +338,8 @@ mod tests { use str; use task::spawn; use unstable::running_on_valgrind; - use io::native::file; - use io::{FileNotFound, Reader, Writer, io_error}; + use io::pipe::PipeStream; + use io::{Writer, Reader, io_error, FileNotFound, OtherIoError}; #[test] #[cfg(not(target_os="android"))] // FIXME(#10380) @@ -426,13 +426,13 @@ mod tests { } fn writeclose(fd: c_int, s: &str) { - let mut writer = file::FileDesc::new(fd, true); + let mut writer = PipeStream::open(fd as int); writer.write(s.as_bytes()); } fn readclose(fd: c_int) -> ~str { let mut res = ~[]; - let mut reader = file::FileDesc::new(fd, true); + let mut reader = PipeStream::open(fd as int); let mut buf = [0, ..1024]; loop { match reader.read(buf) { diff --git a/src/libstd/task.rs b/src/libstd/task.rs new file mode 100644 index 00000000000..4632a3cf6e0 --- /dev/null +++ b/src/libstd/task.rs @@ -0,0 +1,745 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/*! + * Task management. + * + * An executing Rust program consists of a tree of tasks, each with their own + * stack, and sole ownership of their allocated heap data. Tasks communicate + * with each other using ports and channels (see std::rt::comm for more info + * about how communication works). + * + * Tasks can be spawned in 3 different modes. + * + * * Bidirectionally linked: This is the default mode and it's what ```spawn``` does. + * Failures will be propagated from parent to child and vice versa. + * + * * Unidirectionally linked (parent->child): This type of task can be created with + * ```spawn_supervised```. In this case, failures are propagated from parent to child + * but not the other way around. + * + * * Unlinked: Tasks can be completely unlinked. These tasks can be created by using + * ```spawn_unlinked```. In this case failures are not propagated at all. + * + * Tasks' failure modes can be further configured. For instance, parent tasks can (un)watch + * children failures. Please, refer to TaskBuilder's documentation bellow for more information. + * + * When a (bi|uni)directionally linked task fails, its failure will be propagated to all tasks + * linked to it, this will cause such tasks to fail by a `linked failure`. + * + * Task Scheduling: + * + * By default, every task is created in the same scheduler as its parent, where it + * is scheduled cooperatively with all other tasks in that scheduler. Some specialized + * applications may want more control over their scheduling, in which case they can be + * spawned into a new scheduler with the specific properties required. See TaskBuilder's + * documentation bellow for more information. + * + * # Example + * + * ``` + * do spawn { + * log(error, "Hello, World!"); + * } + * ``` + */ + +#[allow(missing_doc)]; + +use any::Any; +use comm::{Chan, Port}; +use kinds::Send; +use option::{None, Some, Option}; +use result::{Result, Ok, Err}; +use rt::local::Local; +use rt::task::Task; +use send_str::{SendStr, IntoSendStr}; +use str::Str; +use util; + +#[cfg(test)] use comm::SharedChan; +#[cfg(test)] use ptr; +#[cfg(test)] use result; + +/// Indicates the manner in which a task exited. +/// +/// A task that completes without failing is considered to exit successfully. +/// Supervised ancestors and linked siblings may yet fail after this task +/// succeeds. Also note that in such a case, it may be nondeterministic whether +/// linked failure or successful exit happen first. +/// +/// If you wish for this result's delivery to block until all linked and/or +/// children tasks complete, recommend using a result future. +pub type TaskResult = Result<(), ~Any>; + +/** + * Task configuration options + * + * # Fields + * + * * watched - Make parent task collect exit status notifications from child + * before reporting its own exit status. (This delays the parent + * task's death and cleanup until after all transitively watched + * children also exit.) True by default. + * + * * notify_chan - Enable lifecycle notifications on the given channel + * + * * name - A name for the task-to-be, for identification in failure messages. + * + * * sched - Specify the configuration of a new scheduler to create the task + * in. This is of particular importance for libraries which want to call + * into foreign code that blocks. Without doing so in a different + * scheduler other tasks will be impeded or even blocked indefinitely. + */ +pub struct TaskOpts { + watched: bool, + notify_chan: Option>, + name: Option, + stack_size: Option +} + +/** + * The task builder type. + * + * Provides detailed control over the properties and behavior of new tasks. + */ +// NB: Builders are designed to be single-use because they do stateful +// things that get weird when reusing - e.g. if you create a result future +// it only applies to a single task, so then you have to maintain Some +// potentially tricky state to ensure that everything behaves correctly +// when you try to reuse the builder to spawn a new task. We'll just +// sidestep that whole issue by making builders uncopyable and making +// the run function move them in. +pub struct TaskBuilder { + opts: TaskOpts, + priv gen_body: Option proc()>, + priv can_not_copy: Option, +} + +/** + * Generate the base configuration for spawning a task, off of which more + * configuration methods can be chained. + * For example, task().unlinked().spawn is equivalent to spawn_unlinked. + */ +pub fn task() -> TaskBuilder { + TaskBuilder { + opts: default_task_opts(), + gen_body: None, + can_not_copy: None, + } +} + +impl TaskBuilder { + fn consume(mut self) -> TaskBuilder { + let gen_body = self.gen_body.take(); + let notify_chan = self.opts.notify_chan.take(); + let name = self.opts.name.take(); + TaskBuilder { + opts: TaskOpts { + watched: self.opts.watched, + notify_chan: notify_chan, + name: name, + stack_size: self.opts.stack_size + }, + gen_body: gen_body, + can_not_copy: None, + } + } + + /// Cause the parent task to collect the child's exit status (and that of + /// all transitively-watched grandchildren) before reporting its own. + pub fn watched(&mut self) { + self.opts.watched = true; + } + + /// Allow the child task to outlive the parent task, at the possible cost + /// of the parent reporting success even if the child task fails later. + pub fn unwatched(&mut self) { + self.opts.watched = false; + } + + /// Get a future representing the exit status of the task. + /// + /// Taking the value of the future will block until the child task + /// terminates. The future result return value will be created *before* the task is + /// spawned; as such, do not invoke .get() on it directly; + /// rather, store it in an outer variable/list for later use. + /// + /// Note that the future returned by this function is only useful for + /// obtaining the value of the next task to be spawning with the + /// builder. If additional tasks are spawned with the same builder + /// then a new result future must be obtained prior to spawning each + /// task. + /// + /// # Failure + /// Fails if a future_result was already set for this task. + pub fn future_result(&mut self) -> Port { + // FIXME (#3725): Once linked failure and notification are + // handled in the library, I can imagine implementing this by just + // registering an arbitrary number of task::on_exit handlers and + // sending out messages. + + if self.opts.notify_chan.is_some() { + fail!("Can't set multiple future_results for one task!"); + } + + // Construct the future and give it to the caller. + let (notify_pipe_po, notify_pipe_ch) = Chan::new(); + + // Reconfigure self to use a notify channel. + self.opts.notify_chan = Some(notify_pipe_ch); + + notify_pipe_po + } + + /// Name the task-to-be. Currently the name is used for identification + /// only in failure messages. + pub fn name(&mut self, name: S) { + self.opts.name = Some(name.into_send_str()); + } + + /** + * Add a wrapper to the body of the spawned task. + * + * Before the task is spawned it is passed through a 'body generator' + * function that may perform local setup operations as well as wrap + * the task body in remote setup operations. With this the behavior + * of tasks can be extended in simple ways. + * + * This function augments the current body generator with a new body + * generator by applying the task body which results from the + * existing body generator to the new body generator. + */ + pub fn add_wrapper(&mut self, wrapper: proc(v: proc()) -> proc()) { + let prev_gen_body = self.gen_body.take(); + let prev_gen_body = match prev_gen_body { + Some(gen) => gen, + None => { + let f: proc(proc()) -> proc() = proc(body) body; + f + } + }; + let next_gen_body = { + let f: proc(proc()) -> proc() = proc(body) { + wrapper(prev_gen_body(body)) + }; + f + }; + self.gen_body = Some(next_gen_body); + } + + /** + * Creates and executes a new child task + * + * Sets up a new task with its own call stack and schedules it to run + * the provided unique closure. The task has the properties and behavior + * specified by the task_builder. + * + * # Failure + * + * When spawning into a new scheduler, the number of threads requested + * must be greater than zero. + */ + pub fn spawn(mut self, f: proc()) { + let gen_body = self.gen_body.take(); + let notify_chan = self.opts.notify_chan.take(); + let name = self.opts.name.take(); + let x = self.consume(); + let opts = TaskOpts { + watched: x.opts.watched, + notify_chan: notify_chan, + name: name, + stack_size: x.opts.stack_size + }; + let f = match gen_body { + Some(gen) => { + gen(f) + } + None => { + f + } + }; + + let t: ~Task = Local::take(); + t.spawn_sibling(opts, f); + } + + /** + * Execute a function in another task and return either the return value + * of the function or result::err. + * + * # Return value + * + * If the function executed successfully then try returns result::ok + * containing the value returned by the function. If the function fails + * then try returns result::err containing nil. + * + * # Failure + * Fails if a future_result was already set for this task. + */ + pub fn try(mut self, f: proc() -> T) -> Result { + let (po, ch) = Chan::new(); + + let result = self.future_result(); + + do self.spawn { + ch.send(f()); + } + + match result.recv() { + Ok(()) => Ok(po.recv()), + Err(cause) => Err(cause) + } + } +} + + +/* Task construction */ + +pub fn default_task_opts() -> TaskOpts { + /*! + * The default task options + * + * By default all tasks are supervised by their parent, are spawned + * into the same scheduler, and do not post lifecycle notifications. + */ + + TaskOpts { + watched: true, + notify_chan: None, + name: None, + stack_size: None + } +} + +/* Spawn convenience functions */ + +/// Creates and executes a new child task +/// +/// Sets up a new task with its own call stack and schedules it to run +/// the provided unique closure. +/// +/// This function is equivalent to `task().spawn(f)`. +pub fn spawn(f: proc()) { + let task = task(); + task.spawn(f) +} + +pub fn try(f: proc() -> T) -> Result { + /*! + * Execute a function in another task and return either the return value + * of the function or result::err. + * + * This is equivalent to task().supervised().try. + */ + + let task = task(); + task.try(f) +} + + +/* Lifecycle functions */ + +/// Read the name of the current task. +pub fn with_task_name(blk: |Option<&str>| -> U) -> U { + use rt::task::Task; + + let mut task = Local::borrow(None::); + match task.get().name { + Some(ref name) => blk(Some(name.as_slice())), + None => blk(None) + } +} + +pub fn deschedule() { + //! Yield control to the task scheduler + + use rt::local::Local; + + // FIXME(#7544): Optimize this, since we know we won't block. + let task: ~Task = Local::take(); + task.yield_now(); +} + +pub fn failing() -> bool { + //! True if the running task has failed + + use rt::task::Task; + + let mut local = Local::borrow(None::); + local.get().unwinder.unwinding() +} + +// The following 8 tests test the following 2^3 combinations: +// {un,}linked {un,}supervised failure propagation {up,down}wards. + +// !!! These tests are dangerous. If Something is buggy, they will hang, !!! +// !!! instead of exiting cleanly. This might wedge the buildbots. !!! + +#[test] +fn test_unnamed_task() { + use rt::test::run_in_uv_task; + + do run_in_uv_task { + do spawn { + with_task_name(|name| { + assert!(name.is_none()); + }) + } + } +} + +#[test] +fn test_owned_named_task() { + use rt::test::run_in_uv_task; + + do run_in_uv_task { + let mut t = task(); + t.name(~"ada lovelace"); + do t.spawn { + with_task_name(|name| { + assert!(name.unwrap() == "ada lovelace"); + }) + } + } +} + +#[test] +fn test_static_named_task() { + use rt::test::run_in_uv_task; + + do run_in_uv_task { + let mut t = task(); + t.name("ada lovelace"); + do t.spawn { + with_task_name(|name| { + assert!(name.unwrap() == "ada lovelace"); + }) + } + } +} + +#[test] +fn test_send_named_task() { + use rt::test::run_in_uv_task; + + do run_in_uv_task { + let mut t = task(); + t.name("ada lovelace".into_send_str()); + do t.spawn { + with_task_name(|name| { + assert!(name.unwrap() == "ada lovelace"); + }) + } + } +} + +#[test] +fn test_run_basic() { + let (po, ch) = Chan::new(); + do task().spawn { + ch.send(()); + } + po.recv(); +} + +#[test] +fn test_add_wrapper() { + let (po, ch) = Chan::new(); + let mut b0 = task(); + do b0.add_wrapper |body| { + let ch = ch; + let result: proc() = proc() { + body(); + ch.send(()); + }; + result + }; + do b0.spawn { } + po.recv(); +} + +#[test] +fn test_future_result() { + let mut builder = task(); + let result = builder.future_result(); + do builder.spawn {} + assert!(result.recv().is_ok()); + + let mut builder = task(); + let result = builder.future_result(); + do builder.spawn { + fail!(); + } + assert!(result.recv().is_err()); +} + +#[test] #[should_fail] +fn test_back_to_the_future_result() { + let mut builder = task(); + builder.future_result(); + builder.future_result(); +} + +#[test] +fn test_try_success() { + match do try { + ~"Success!" + } { + result::Ok(~"Success!") => (), + _ => fail!() + } +} + +#[test] +fn test_try_fail() { + match do try { + fail!() + } { + result::Err(_) => (), + result::Ok(()) => fail!() + } +} + +#[cfg(test)] +fn get_sched_id() -> int { + use rt::sched::Scheduler; + let mut sched = Local::borrow(None::); + sched.get().sched_id() as int +} + +#[test] +fn test_spawn_sched() { + let (po, ch) = SharedChan::new(); + + fn f(i: int, ch: SharedChan<()>) { + let parent_sched_id = get_sched_id(); + + do spawn_sched(SingleThreaded) { + let child_sched_id = get_sched_id(); + assert!(parent_sched_id != child_sched_id); + + if (i == 0) { + ch.send(()); + } else { + f(i - 1, ch.clone()); + } + }; + + } + f(10, ch); + po.recv(); +} + +#[test] +fn test_spawn_sched_childs_on_default_sched() { + let (po, ch) = Chan::new(); + + // Assuming tests run on the default scheduler + let default_id = get_sched_id(); + + do spawn_sched(SingleThreaded) { + let ch = ch; + let parent_sched_id = get_sched_id(); + do spawn { + let child_sched_id = get_sched_id(); + assert!(parent_sched_id != child_sched_id); + assert_eq!(child_sched_id, default_id); + ch.send(()); + }; + }; + + po.recv(); +} + +#[test] +fn test_spawn_sched_blocking() { + use unstable::mutex::Mutex; + + unsafe { + + // Testing that a task in one scheduler can block in foreign code + // without affecting other schedulers + 20u.times(|| { + let (start_po, start_ch) = Chan::new(); + let (fin_po, fin_ch) = Chan::new(); + + let mut lock = Mutex::new(); + let lock2 = lock.clone(); + + do spawn_sched(SingleThreaded) { + let mut lock = lock2; + lock.lock(); + + start_ch.send(()); + + // Block the scheduler thread + lock.wait(); + lock.unlock(); + + fin_ch.send(()); + }; + + // Wait until the other task has its lock + start_po.recv(); + + fn pingpong(po: &Port, ch: &Chan) { + let mut val = 20; + while val > 0 { + val = po.recv(); + ch.try_send(val - 1); + } + } + + let (setup_po, setup_ch) = Chan::new(); + let (parent_po, parent_ch) = Chan::new(); + do spawn { + let (child_po, child_ch) = Chan::new(); + setup_ch.send(child_ch); + pingpong(&child_po, &parent_ch); + }; + + let child_ch = setup_po.recv(); + child_ch.send(20); + pingpong(&parent_po, &child_ch); + lock.lock(); + lock.signal(); + lock.unlock(); + fin_po.recv(); + lock.destroy(); + }) + } +} + +#[cfg(test)] +fn avoid_copying_the_body(spawnfn: |v: proc()|) { + let (p, ch) = Chan::::new(); + + let x = ~1; + let x_in_parent = ptr::to_unsafe_ptr(&*x) as uint; + + do spawnfn { + let x_in_child = ptr::to_unsafe_ptr(&*x) as uint; + ch.send(x_in_child); + } + + let x_in_child = p.recv(); + assert_eq!(x_in_parent, x_in_child); +} + +#[test] +fn test_avoid_copying_the_body_spawn() { + avoid_copying_the_body(spawn); +} + +#[test] +fn test_avoid_copying_the_body_task_spawn() { + avoid_copying_the_body(|f| { + let builder = task(); + do builder.spawn || { + f(); + } + }) +} + +#[test] +fn test_avoid_copying_the_body_try() { + avoid_copying_the_body(|f| { + do try || { + f() + }; + }) +} + +#[test] +fn test_child_doesnt_ref_parent() { + // If the child refcounts the parent task, this will stack overflow when + // climbing the task tree to dereference each ancestor. (See #1789) + // (well, it would if the constant were 8000+ - I lowered it to be more + // valgrind-friendly. try this at home, instead..!) + static generations: uint = 16; + fn child_no(x: uint) -> proc() { + return proc() { + if x < generations { + let mut t = task(); + t.unwatched(); + t.spawn(child_no(x+1)); + } + } + } + let mut t = task(); + t.unwatched(); + t.spawn(child_no(0)); +} + +#[test] +fn test_simple_newsched_spawn() { + use rt::test::run_in_uv_task; + + do run_in_uv_task { + spawn(proc()()) + } +} + +#[test] +fn test_try_fail_message_static_str() { + match do try { + fail!("static string"); + } { + Err(e) => { + type T = &'static str; + assert!(e.is::()); + assert_eq!(*e.move::().unwrap(), "static string"); + } + Ok(()) => fail!() + } +} + +#[test] +fn test_try_fail_message_owned_str() { + match do try { + fail!(~"owned string"); + } { + Err(e) => { + type T = ~str; + assert!(e.is::()); + assert_eq!(*e.move::().unwrap(), ~"owned string"); + } + Ok(()) => fail!() + } +} + +#[test] +fn test_try_fail_message_any() { + match do try { + fail!(~413u16 as ~Any); + } { + Err(e) => { + type T = ~Any; + assert!(e.is::()); + let any = e.move::().unwrap(); + assert!(any.is::()); + assert_eq!(*any.move::().unwrap(), 413u16); + } + Ok(()) => fail!() + } +} + +#[test] +fn test_try_fail_message_unit_struct() { + struct Juju; + + match do try { + fail!(Juju) + } { + Err(ref e) if e.is::() => {} + Err(_) | Ok(()) => fail!() + } +} diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs deleted file mode 100644 index 3310dddc327..00000000000 --- a/src/libstd/task/mod.rs +++ /dev/null @@ -1,799 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*! - * Task management. - * - * An executing Rust program consists of a tree of tasks, each with their own - * stack, and sole ownership of their allocated heap data. Tasks communicate - * with each other using ports and channels (see std::rt::comm for more info - * about how communication works). - * - * Tasks can be spawned in 3 different modes. - * - * * Bidirectionally linked: This is the default mode and it's what ```spawn``` does. - * Failures will be propagated from parent to child and vice versa. - * - * * Unidirectionally linked (parent->child): This type of task can be created with - * ```spawn_supervised```. In this case, failures are propagated from parent to child - * but not the other way around. - * - * * Unlinked: Tasks can be completely unlinked. These tasks can be created by using - * ```spawn_unlinked```. In this case failures are not propagated at all. - * - * Tasks' failure modes can be further configured. For instance, parent tasks can (un)watch - * children failures. Please, refer to TaskBuilder's documentation bellow for more information. - * - * When a (bi|uni)directionally linked task fails, its failure will be propagated to all tasks - * linked to it, this will cause such tasks to fail by a `linked failure`. - * - * Task Scheduling: - * - * By default, every task is created in the same scheduler as its parent, where it - * is scheduled cooperatively with all other tasks in that scheduler. Some specialized - * applications may want more control over their scheduling, in which case they can be - * spawned into a new scheduler with the specific properties required. See TaskBuilder's - * documentation bellow for more information. - * - * # Example - * - * ``` - * do spawn { - * log(error, "Hello, World!"); - * } - * ``` - */ - -#[allow(missing_doc)]; - -use prelude::*; - -use comm::{Chan, Port}; -use result::{Result, Ok, Err}; -use rt::in_green_task_context; -use rt::local::Local; -use send_str::{SendStr, IntoSendStr}; -use util; - -#[cfg(test)] use any::Any; -#[cfg(test)] use comm::SharedChan; -#[cfg(test)] use ptr; -#[cfg(test)] use result; - -pub mod spawn; - -/// Indicates the manner in which a task exited. -/// -/// A task that completes without failing is considered to exit successfully. -/// Supervised ancestors and linked siblings may yet fail after this task -/// succeeds. Also note that in such a case, it may be nondeterministic whether -/// linked failure or successful exit happen first. -/// -/// If you wish for this result's delivery to block until all linked and/or -/// children tasks complete, recommend using a result future. -pub type TaskResult = Result<(), ~Any>; - -/// Scheduler modes -#[deriving(Eq)] -pub enum SchedMode { - /// Run task on the default scheduler - DefaultScheduler, - /// All tasks run in the same OS thread - SingleThreaded, -} - -/** - * Scheduler configuration options - * - * # Fields - * - * * sched_mode - The operating mode of the scheduler - * - */ -pub struct SchedOpts { - priv mode: SchedMode, -} - -/** - * Task configuration options - * - * # Fields - * - * * watched - Make parent task collect exit status notifications from child - * before reporting its own exit status. (This delays the parent - * task's death and cleanup until after all transitively watched - * children also exit.) True by default. - * - * * notify_chan - Enable lifecycle notifications on the given channel - * - * * name - A name for the task-to-be, for identification in failure messages. - * - * * sched - Specify the configuration of a new scheduler to create the task - * in. This is of particular importance for libraries which want to call - * into foreign code that blocks. Without doing so in a different - * scheduler other tasks will be impeded or even blocked indefinitely. - */ -pub struct TaskOpts { - priv watched: bool, - priv notify_chan: Option>, - name: Option, - sched: SchedOpts, - stack_size: Option -} - -/** - * The task builder type. - * - * Provides detailed control over the properties and behavior of new tasks. - */ -// NB: Builders are designed to be single-use because they do stateful -// things that get weird when reusing - e.g. if you create a result future -// it only applies to a single task, so then you have to maintain Some -// potentially tricky state to ensure that everything behaves correctly -// when you try to reuse the builder to spawn a new task. We'll just -// sidestep that whole issue by making builders uncopyable and making -// the run function move them in. -pub struct TaskBuilder { - opts: TaskOpts, - priv gen_body: Option proc()>, - priv can_not_copy: Option, -} - -/** - * Generate the base configuration for spawning a task, off of which more - * configuration methods can be chained. - * For example, task().unlinked().spawn is equivalent to spawn_unlinked. - */ -pub fn task() -> TaskBuilder { - TaskBuilder { - opts: default_task_opts(), - gen_body: None, - can_not_copy: None, - } -} - -impl TaskBuilder { - fn consume(mut self) -> TaskBuilder { - let gen_body = self.gen_body.take(); - let notify_chan = self.opts.notify_chan.take(); - let name = self.opts.name.take(); - TaskBuilder { - opts: TaskOpts { - watched: self.opts.watched, - notify_chan: notify_chan, - name: name, - sched: self.opts.sched, - stack_size: self.opts.stack_size - }, - gen_body: gen_body, - can_not_copy: None, - } - } - - /// Cause the parent task to collect the child's exit status (and that of - /// all transitively-watched grandchildren) before reporting its own. - pub fn watched(&mut self) { - self.opts.watched = true; - } - - /// Allow the child task to outlive the parent task, at the possible cost - /// of the parent reporting success even if the child task fails later. - pub fn unwatched(&mut self) { - self.opts.watched = false; - } - - /// Get a future representing the exit status of the task. - /// - /// Taking the value of the future will block until the child task - /// terminates. The future result return value will be created *before* the task is - /// spawned; as such, do not invoke .get() on it directly; - /// rather, store it in an outer variable/list for later use. - /// - /// Note that the future returned by this function is only useful for - /// obtaining the value of the next task to be spawning with the - /// builder. If additional tasks are spawned with the same builder - /// then a new result future must be obtained prior to spawning each - /// task. - /// - /// # Failure - /// Fails if a future_result was already set for this task. - pub fn future_result(&mut self) -> Port { - // FIXME (#3725): Once linked failure and notification are - // handled in the library, I can imagine implementing this by just - // registering an arbitrary number of task::on_exit handlers and - // sending out messages. - - if self.opts.notify_chan.is_some() { - fail!("Can't set multiple future_results for one task!"); - } - - // Construct the future and give it to the caller. - let (notify_pipe_po, notify_pipe_ch) = Chan::new(); - - // Reconfigure self to use a notify channel. - self.opts.notify_chan = Some(notify_pipe_ch); - - notify_pipe_po - } - - /// Name the task-to-be. Currently the name is used for identification - /// only in failure messages. - pub fn name(&mut self, name: S) { - self.opts.name = Some(name.into_send_str()); - } - - /// Configure a custom scheduler mode for the task. - pub fn sched_mode(&mut self, mode: SchedMode) { - self.opts.sched.mode = mode; - } - - /** - * Add a wrapper to the body of the spawned task. - * - * Before the task is spawned it is passed through a 'body generator' - * function that may perform local setup operations as well as wrap - * the task body in remote setup operations. With this the behavior - * of tasks can be extended in simple ways. - * - * This function augments the current body generator with a new body - * generator by applying the task body which results from the - * existing body generator to the new body generator. - */ - pub fn add_wrapper(&mut self, wrapper: proc(v: proc()) -> proc()) { - let prev_gen_body = self.gen_body.take(); - let prev_gen_body = match prev_gen_body { - Some(gen) => gen, - None => { - let f: proc(proc()) -> proc() = proc(body) body; - f - } - }; - let next_gen_body = { - let f: proc(proc()) -> proc() = proc(body) { - wrapper(prev_gen_body(body)) - }; - f - }; - self.gen_body = Some(next_gen_body); - } - - /** - * Creates and executes a new child task - * - * Sets up a new task with its own call stack and schedules it to run - * the provided unique closure. The task has the properties and behavior - * specified by the task_builder. - * - * # Failure - * - * When spawning into a new scheduler, the number of threads requested - * must be greater than zero. - */ - pub fn spawn(mut self, f: proc()) { - let gen_body = self.gen_body.take(); - let notify_chan = self.opts.notify_chan.take(); - let name = self.opts.name.take(); - let x = self.consume(); - let opts = TaskOpts { - watched: x.opts.watched, - notify_chan: notify_chan, - name: name, - sched: x.opts.sched, - stack_size: x.opts.stack_size - }; - let f = match gen_body { - Some(gen) => { - gen(f) - } - None => { - f - } - }; - spawn::spawn_raw(opts, f); - } - - /** - * Execute a function in another task and return either the return value - * of the function or result::err. - * - * # Return value - * - * If the function executed successfully then try returns result::ok - * containing the value returned by the function. If the function fails - * then try returns result::err containing nil. - * - * # Failure - * Fails if a future_result was already set for this task. - */ - pub fn try(mut self, f: proc() -> T) -> Result { - let (po, ch) = Chan::new(); - - let result = self.future_result(); - - do self.spawn { - ch.send(f()); - } - - match result.recv() { - Ok(()) => Ok(po.recv()), - Err(cause) => Err(cause) - } - } -} - - -/* Task construction */ - -pub fn default_task_opts() -> TaskOpts { - /*! - * The default task options - * - * By default all tasks are supervised by their parent, are spawned - * into the same scheduler, and do not post lifecycle notifications. - */ - - TaskOpts { - watched: true, - notify_chan: None, - name: None, - sched: SchedOpts { - mode: DefaultScheduler, - }, - stack_size: None - } -} - -/* Spawn convenience functions */ - -/// Creates and executes a new child task -/// -/// Sets up a new task with its own call stack and schedules it to run -/// the provided unique closure. -/// -/// This function is equivalent to `task().spawn(f)`. -pub fn spawn(f: proc()) { - let task = task(); - task.spawn(f) -} - -pub fn spawn_sched(mode: SchedMode, f: proc()) { - /*! - * Creates a new task on a new or existing scheduler. - * - * When there are no more tasks to execute the - * scheduler terminates. - * - * # Failure - * - * In manual threads mode the number of threads requested must be - * greater than zero. - */ - - let mut task = task(); - task.sched_mode(mode); - task.spawn(f) -} - -pub fn try(f: proc() -> T) -> Result { - /*! - * Execute a function in another task and return either the return value - * of the function or result::err. - * - * This is equivalent to task().supervised().try. - */ - - let task = task(); - task.try(f) -} - - -/* Lifecycle functions */ - -/// Read the name of the current task. -pub fn with_task_name(blk: |Option<&str>| -> U) -> U { - use rt::task::Task; - - if in_green_task_context() { - let mut task = Local::borrow(None::); - match task.get().name { - Some(ref name) => blk(Some(name.as_slice())), - None => blk(None) - } - } else { - fail!("no task name exists in non-green task context") - } -} - -pub fn deschedule() { - //! Yield control to the task scheduler - - use rt::local::Local; - use rt::sched::Scheduler; - - // FIXME(#7544): Optimize this, since we know we won't block. - let sched: ~Scheduler = Local::take(); - sched.yield_now(); -} - -pub fn failing() -> bool { - //! True if the running task has failed - - use rt::task::Task; - - let mut local = Local::borrow(None::); - local.get().unwinder.unwinding -} - -// The following 8 tests test the following 2^3 combinations: -// {un,}linked {un,}supervised failure propagation {up,down}wards. - -// !!! These tests are dangerous. If Something is buggy, they will hang, !!! -// !!! instead of exiting cleanly. This might wedge the buildbots. !!! - -#[test] -fn test_unnamed_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - do spawn { - with_task_name(|name| { - assert!(name.is_none()); - }) - } - } -} - -#[test] -fn test_owned_named_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - let mut t = task(); - t.name(~"ada lovelace"); - do t.spawn { - with_task_name(|name| { - assert!(name.unwrap() == "ada lovelace"); - }) - } - } -} - -#[test] -fn test_static_named_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - let mut t = task(); - t.name("ada lovelace"); - do t.spawn { - with_task_name(|name| { - assert!(name.unwrap() == "ada lovelace"); - }) - } - } -} - -#[test] -fn test_send_named_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - let mut t = task(); - t.name("ada lovelace".into_send_str()); - do t.spawn { - with_task_name(|name| { - assert!(name.unwrap() == "ada lovelace"); - }) - } - } -} - -#[test] -fn test_run_basic() { - let (po, ch) = Chan::new(); - do task().spawn { - ch.send(()); - } - po.recv(); -} - -#[test] -fn test_add_wrapper() { - let (po, ch) = Chan::new(); - let mut b0 = task(); - do b0.add_wrapper |body| { - let ch = ch; - let result: proc() = proc() { - body(); - ch.send(()); - }; - result - }; - do b0.spawn { } - po.recv(); -} - -#[test] -fn test_future_result() { - let mut builder = task(); - let result = builder.future_result(); - do builder.spawn {} - assert!(result.recv().is_ok()); - - let mut builder = task(); - let result = builder.future_result(); - do builder.spawn { - fail!(); - } - assert!(result.recv().is_err()); -} - -#[test] #[should_fail] -fn test_back_to_the_future_result() { - let mut builder = task(); - builder.future_result(); - builder.future_result(); -} - -#[test] -fn test_try_success() { - match do try { - ~"Success!" - } { - result::Ok(~"Success!") => (), - _ => fail!() - } -} - -#[test] -fn test_try_fail() { - match do try { - fail!() - } { - result::Err(_) => (), - result::Ok(()) => fail!() - } -} - -#[cfg(test)] -fn get_sched_id() -> int { - use rt::sched::Scheduler; - let mut sched = Local::borrow(None::); - sched.get().sched_id() as int -} - -#[test] -fn test_spawn_sched() { - let (po, ch) = SharedChan::new(); - - fn f(i: int, ch: SharedChan<()>) { - let parent_sched_id = get_sched_id(); - - do spawn_sched(SingleThreaded) { - let child_sched_id = get_sched_id(); - assert!(parent_sched_id != child_sched_id); - - if (i == 0) { - ch.send(()); - } else { - f(i - 1, ch.clone()); - } - }; - - } - f(10, ch); - po.recv(); -} - -#[test] -fn test_spawn_sched_childs_on_default_sched() { - let (po, ch) = Chan::new(); - - // Assuming tests run on the default scheduler - let default_id = get_sched_id(); - - do spawn_sched(SingleThreaded) { - let ch = ch; - let parent_sched_id = get_sched_id(); - do spawn { - let child_sched_id = get_sched_id(); - assert!(parent_sched_id != child_sched_id); - assert_eq!(child_sched_id, default_id); - ch.send(()); - }; - }; - - po.recv(); -} - -#[test] -fn test_spawn_sched_blocking() { - use unstable::mutex::Mutex; - - unsafe { - - // Testing that a task in one scheduler can block in foreign code - // without affecting other schedulers - 20u.times(|| { - let (start_po, start_ch) = Chan::new(); - let (fin_po, fin_ch) = Chan::new(); - - let mut lock = Mutex::new(); - let lock2 = lock.clone(); - - do spawn_sched(SingleThreaded) { - let mut lock = lock2; - lock.lock(); - - start_ch.send(()); - - // Block the scheduler thread - lock.wait(); - lock.unlock(); - - fin_ch.send(()); - }; - - // Wait until the other task has its lock - start_po.recv(); - - fn pingpong(po: &Port, ch: &Chan) { - let mut val = 20; - while val > 0 { - val = po.recv(); - ch.try_send(val - 1); - } - } - - let (setup_po, setup_ch) = Chan::new(); - let (parent_po, parent_ch) = Chan::new(); - do spawn { - let (child_po, child_ch) = Chan::new(); - setup_ch.send(child_ch); - pingpong(&child_po, &parent_ch); - }; - - let child_ch = setup_po.recv(); - child_ch.send(20); - pingpong(&parent_po, &child_ch); - lock.lock(); - lock.signal(); - lock.unlock(); - fin_po.recv(); - lock.destroy(); - }) - } -} - -#[cfg(test)] -fn avoid_copying_the_body(spawnfn: |v: proc()|) { - let (p, ch) = Chan::::new(); - - let x = ~1; - let x_in_parent = ptr::to_unsafe_ptr(&*x) as uint; - - do spawnfn { - let x_in_child = ptr::to_unsafe_ptr(&*x) as uint; - ch.send(x_in_child); - } - - let x_in_child = p.recv(); - assert_eq!(x_in_parent, x_in_child); -} - -#[test] -fn test_avoid_copying_the_body_spawn() { - avoid_copying_the_body(spawn); -} - -#[test] -fn test_avoid_copying_the_body_task_spawn() { - avoid_copying_the_body(|f| { - let builder = task(); - do builder.spawn || { - f(); - } - }) -} - -#[test] -fn test_avoid_copying_the_body_try() { - avoid_copying_the_body(|f| { - do try || { - f() - }; - }) -} - -#[test] -fn test_child_doesnt_ref_parent() { - // If the child refcounts the parent task, this will stack overflow when - // climbing the task tree to dereference each ancestor. (See #1789) - // (well, it would if the constant were 8000+ - I lowered it to be more - // valgrind-friendly. try this at home, instead..!) - static generations: uint = 16; - fn child_no(x: uint) -> proc() { - return proc() { - if x < generations { - let mut t = task(); - t.unwatched(); - t.spawn(child_no(x+1)); - } - } - } - let mut t = task(); - t.unwatched(); - t.spawn(child_no(0)); -} - -#[test] -fn test_simple_newsched_spawn() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - spawn(proc()()) - } -} - -#[test] -fn test_try_fail_message_static_str() { - match do try { - fail!("static string"); - } { - Err(e) => { - type T = &'static str; - assert!(e.is::()); - assert_eq!(*e.move::().unwrap(), "static string"); - } - Ok(()) => fail!() - } -} - -#[test] -fn test_try_fail_message_owned_str() { - match do try { - fail!(~"owned string"); - } { - Err(e) => { - type T = ~str; - assert!(e.is::()); - assert_eq!(*e.move::().unwrap(), ~"owned string"); - } - Ok(()) => fail!() - } -} - -#[test] -fn test_try_fail_message_any() { - match do try { - fail!(~413u16 as ~Any); - } { - Err(e) => { - type T = ~Any; - assert!(e.is::()); - let any = e.move::().unwrap(); - assert!(any.is::()); - assert_eq!(*any.move::().unwrap(), 413u16); - } - Ok(()) => fail!() - } -} - -#[test] -fn test_try_fail_message_unit_struct() { - struct Juju; - - match do try { - fail!(Juju) - } { - Err(ref e) if e.is::() => {} - Err(_) | Ok(()) => fail!() - } -} diff --git a/src/libstd/task/spawn.rs b/src/libstd/task/spawn.rs deleted file mode 100644 index 1148774020a..00000000000 --- a/src/libstd/task/spawn.rs +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -/*!************************************************************************** - * - * WARNING: linked failure has been removed since this doc comment was written, - * but it was so pretty that I didn't want to remove it. - * - * Spawning & linked failure - * - * Several data structures are involved in task management to allow properly - * propagating failure across linked/supervised tasks. - * - * (1) The "taskgroup_arc" is an unsafe::exclusive which contains a hashset of - * all tasks that are part of the group. Some tasks are 'members', which - * means if they fail, they will kill everybody else in the taskgroup. - * Other tasks are 'descendants', which means they will not kill tasks - * from this group, but can be killed by failing members. - * - * A new one of these is created each spawn_linked or spawn_supervised. - * - * (2) The "taskgroup" is a per-task control structure that tracks a task's - * spawn configuration. It contains a reference to its taskgroup_arc, a - * reference to its node in the ancestor list (below), and an optionally - * configured notification port. These are stored in TLS. - * - * (3) The "ancestor_list" is a cons-style list of unsafe::exclusives which - * tracks 'generations' of taskgroups -- a group's ancestors are groups - * which (directly or transitively) spawn_supervised-ed them. Each task - * is recorded in the 'descendants' of each of its ancestor groups. - * - * Spawning a supervised task is O(n) in the number of generations still - * alive, and exiting (by success or failure) that task is also O(n). - * - * This diagram depicts the references between these data structures: - * - * linked_________________________________ - * ___/ _________ \___ - * / \ | group X | / \ - * ( A ) - - - - - - - > | {A,B} {}|< - - -( B ) - * \___/ |_________| \___/ - * unlinked - * | __ (nil) - * | //| The following code causes this: - * |__ // /\ _________ - * / \ // || | group Y | fn taskA() { - * ( C )- - - ||- - - > |{C} {D,E}| spawn(taskB); - * \___/ / \=====> |_________| spawn_unlinked(taskC); - * supervise /gen \ ... - * | __ \ 00 / } - * | //| \__/ fn taskB() { ... } - * |__ // /\ _________ fn taskC() { - * / \/ || | group Z | spawn_supervised(taskD); - * ( D )- - - ||- - - > | {D} {E} | ... - * \___/ / \=====> |_________| } - * supervise /gen \ fn taskD() { - * | __ \ 01 / spawn_supervised(taskE); - * | //| \__/ ... - * |__ // _________ } - * / \/ | group W | fn taskE() { ... } - * ( E )- - - - - - - > | {E} {} | - * \___/ |_________| - * - * "tcb" "taskgroup_arc" - * "ancestor_list" - * - ****************************************************************************/ - -#[doc(hidden)]; - -use prelude::*; - -use comm::Chan; -use rt::local::Local; -use rt::sched::{Scheduler, Shutdown, TaskFromFriend}; -use rt::task::{Task, Sched}; -use rt::thread::Thread; -use rt::{in_green_task_context, new_event_loop}; -use task::{SingleThreaded, TaskOpts, TaskResult}; - -#[cfg(test)] use task::default_task_opts; -#[cfg(test)] use task; - -pub fn spawn_raw(mut opts: TaskOpts, f: proc()) { - assert!(in_green_task_context()); - - let mut task = if opts.sched.mode != SingleThreaded { - if opts.watched { - Task::build_child(opts.stack_size, f) - } else { - Task::build_root(opts.stack_size, f) - } - } else { - unsafe { - // Creating a 1:1 task:thread ... - let sched: *mut Scheduler = Local::unsafe_borrow(); - let sched_handle = (*sched).make_handle(); - - // Since this is a 1:1 scheduler we create a queue not in - // the stealee set. The run_anything flag is set false - // which will disable stealing. - let (worker, _stealer) = (*sched).work_queue.pool().deque(); - - // Create a new scheduler to hold the new task - let mut new_sched = ~Scheduler::new_special(new_event_loop(), - worker, - (*sched).work_queues.clone(), - (*sched).sleeper_list.clone(), - false, - Some(sched_handle)); - let mut new_sched_handle = new_sched.make_handle(); - - // Allow the scheduler to exit when the pinned task exits - new_sched_handle.send(Shutdown); - - // Pin the new task to the new scheduler - let new_task = if opts.watched { - Task::build_homed_child(opts.stack_size, f, Sched(new_sched_handle)) - } else { - Task::build_homed_root(opts.stack_size, f, Sched(new_sched_handle)) - }; - - // Create a task that will later be used to join with the new scheduler - // thread when it is ready to terminate - let (thread_port, thread_chan) = Chan::new(); - let join_task = do Task::build_child(None) { - debug!("running join task"); - let thread: Thread<()> = thread_port.recv(); - thread.join(); - }; - - // Put the scheduler into another thread - let orig_sched_handle = (*sched).make_handle(); - - let new_sched = new_sched; - let thread = do Thread::start { - let mut new_sched = new_sched; - let mut orig_sched_handle = orig_sched_handle; - - let bootstrap_task = ~do Task::new_root(&mut new_sched.stack_pool, None) || { - debug!("boostrapping a 1:1 scheduler"); - }; - new_sched.bootstrap(bootstrap_task); - - // Now tell the original scheduler to join with this thread - // by scheduling a thread-joining task on the original scheduler - orig_sched_handle.send(TaskFromFriend(join_task)); - - // NB: We can't simply send a message from here to another task - // because this code isn't running in a task and message passing doesn't - // work outside of tasks. Hence we're sending a scheduler message - // to execute a new task directly to a scheduler. - }; - - // Give the thread handle to the join task - thread_chan.send(thread); - - // When this task is enqueued on the current scheduler it will then get - // forwarded to the scheduler to which it is pinned - new_task - } - }; - - if opts.notify_chan.is_some() { - let notify_chan = opts.notify_chan.take_unwrap(); - let on_exit: proc(TaskResult) = proc(task_result) { - notify_chan.try_send(task_result); - }; - task.death.on_exit = Some(on_exit); - } - - task.name = opts.name.take(); - debug!("spawn calling run_task"); - Scheduler::run_task(task); - -} - -#[test] -fn test_spawn_raw_simple() { - let (po, ch) = Chan::new(); - do spawn_raw(default_task_opts()) { - ch.send(()); - } - po.recv(); -} - -#[test] -fn test_spawn_raw_unsupervise() { - let opts = task::TaskOpts { - watched: false, - notify_chan: None, - .. default_task_opts() - }; - do spawn_raw(opts) { - fail!(); - } -} - -#[test] -fn test_spawn_raw_notify_success() { - let (notify_po, notify_ch) = Chan::new(); - - let opts = task::TaskOpts { - notify_chan: Some(notify_ch), - .. default_task_opts() - }; - do spawn_raw(opts) { - } - assert!(notify_po.recv().is_ok()); -} - -#[test] -fn test_spawn_raw_notify_failure() { - // New bindings for these - let (notify_po, notify_ch) = Chan::new(); - - let opts = task::TaskOpts { - watched: false, - notify_chan: Some(notify_ch), - .. default_task_opts() - }; - do spawn_raw(opts) { - fail!(); - } - assert!(notify_po.recv().is_err()); -} diff --git a/src/libstd/unstable/lang.rs b/src/libstd/unstable/lang.rs index 06f9ba65ae7..e7e8cec9d5f 100644 --- a/src/libstd/unstable/lang.rs +++ b/src/libstd/unstable/lang.rs @@ -11,15 +11,13 @@ //! Runtime calls emitted by the compiler. use c_str::ToCStr; -use cast::transmute; use libc::{c_char, size_t, uintptr_t}; -use rt::task; use rt::borrowck; #[cold] #[lang="fail_"] pub fn fail_(expr: *c_char, file: *c_char, line: size_t) -> ! { - task::begin_unwind_raw(expr, file, line); + ::rt::begin_unwind_raw(expr, file, line); } #[cold] @@ -81,15 +79,3 @@ pub unsafe fn check_not_borrowed(a: *u8, line: size_t) { borrowck::check_not_borrowed(a, file, line) } - -#[lang="start"] -pub fn start(main: *u8, argc: int, argv: **c_char) -> int { - use rt; - - unsafe { - return do rt::start(argc, argv as **u8) { - let main: extern "Rust" fn() = transmute(main); - main(); - }; - } -} diff --git a/src/libstd/unstable/mod.rs b/src/libstd/unstable/mod.rs index f70d0b5169f..f4573785996 100644 --- a/src/libstd/unstable/mod.rs +++ b/src/libstd/unstable/mod.rs @@ -23,6 +23,7 @@ pub mod lang; pub mod sync; pub mod mutex; pub mod raw; +pub mod stack; /** diff --git a/src/libsyntax/ext/expand.rs b/src/libsyntax/ext/expand.rs index a6e45c7e1bb..2c2669e914c 100644 --- a/src/libsyntax/ext/expand.rs +++ b/src/libsyntax/ext/expand.rs @@ -740,10 +740,10 @@ pub fn std_macros() -> @str { fail!("explicit failure") ); ($msg:expr) => ( - ::std::rt::task::begin_unwind($msg, file!(), line!()) + ::std::rt::begin_unwind($msg, file!(), line!()) ); ($fmt:expr, $($arg:tt)*) => ( - ::std::rt::task::begin_unwind(format!($fmt, $($arg)*), file!(), line!()) + ::std::rt::begin_unwind(format!($fmt, $($arg)*), file!(), line!()) ) ) -- cgit 1.4.1-3-g733a5 From 7554f5c58f840b648bc3c5b2d24d0df6683eed03 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 12 Dec 2013 18:05:37 -0800 Subject: std: Fix a bug where Local::take() didn't zero out In the compiled version of local_ptr (that with #[thread_local]), the take() funciton didn't zero-out the previous pointer, allowing for multiple takes (with fewer runtime assertions being tripped). --- src/libstd/rt/local_ptr.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'src/libstd/rt') diff --git a/src/libstd/rt/local_ptr.rs b/src/libstd/rt/local_ptr.rs index 925aa802ad5..b75f2927003 100644 --- a/src/libstd/rt/local_ptr.rs +++ b/src/libstd/rt/local_ptr.rs @@ -109,7 +109,9 @@ pub mod compiled { /// Does not validate the pointer type. #[inline] pub unsafe fn take() -> ~T { - let ptr: ~T = cast::transmute(RT_TLS_PTR); + let ptr = RT_TLS_PTR; + assert!(!ptr.is_null()); + let ptr: ~T = cast::transmute(ptr); // can't use `as`, due to type not matching with `cfg(test)` RT_TLS_PTR = cast::transmute(0); ptr -- cgit 1.4.1-3-g733a5 From 780afeaf0a2c063c68d91660eb15a3314f1eadcb Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 12 Dec 2013 18:06:39 -0800 Subject: std: Update std::rt::thread to specify stack sizes It's now possible to spawn an OS thread with a stack that has a specific size. --- src/libstd/rt/thread.rs | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) (limited to 'src/libstd/rt') diff --git a/src/libstd/rt/thread.rs b/src/libstd/rt/thread.rs index 0542c444a84..11189282f68 100644 --- a/src/libstd/rt/thread.rs +++ b/src/libstd/rt/thread.rs @@ -33,7 +33,7 @@ pub struct Thread { priv packet: ~Option, } -static DEFAULT_STACK_SIZE: libc::size_t = 1024 * 1024; +static DEFAULT_STACK_SIZE: uint = 1024 * 1024; // This is the starting point of rust os threads. The first thing we do // is make sure that we don't trigger __morestack (also why this has a @@ -84,7 +84,7 @@ impl Thread<()> { *cast::transmute::<&~Option, **mut Option>(&packet) }; let main: proc() = proc() unsafe { *packet2 = Some(main()); }; - let native = unsafe { imp::create(~main) }; + let native = unsafe { imp::create(stack, ~main) }; Thread { native: native, @@ -100,8 +100,14 @@ impl Thread<()> { /// systems. Note that platforms may not keep the main program alive even if /// there are detached thread still running around. pub fn spawn(main: proc()) { + Thread::spawn_stack(DEFAULT_STACK_SIZE, main) + } + + /// Performs the same functionality as `spawn`, but explicitly specifies a + /// stack size for the new thread. + pub fn spawn_stack(stack: uint, main: proc()) { unsafe { - let handle = imp::create(~main); + let handle = imp::create(stack, ~main); imp::detach(handle); } } @@ -145,13 +151,15 @@ mod imp { use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL, LPVOID, DWORD, LPDWORD, HANDLE}; use ptr; + use libc; + use cast; pub type rust_thread = HANDLE; pub type rust_thread_return = DWORD; - pub unsafe fn create(p: ~proc()) -> rust_thread { + pub unsafe fn create(stack: uint, p: ~proc()) -> rust_thread { let arg: *mut libc::c_void = cast::transmute(p); - CreateThread(ptr::mut_null(), DEFAULT_STACK_SIZE, super::thread_start, + CreateThread(ptr::mut_null(), stack as libc::size_t, super::thread_start, arg, 0, ptr::mut_null()) } @@ -189,17 +197,17 @@ mod imp { use libc::consts::os::posix01::PTHREAD_CREATE_JOINABLE; use libc; use ptr; - use super::DEFAULT_STACK_SIZE; use unstable::intrinsics; pub type rust_thread = libc::pthread_t; pub type rust_thread_return = *libc::c_void; - pub unsafe fn create(p: ~proc()) -> rust_thread { + pub unsafe fn create(stack: uint, p: ~proc()) -> rust_thread { let mut native: libc::pthread_t = intrinsics::uninit(); let mut attr: libc::pthread_attr_t = intrinsics::uninit(); assert_eq!(pthread_attr_init(&mut attr), 0); - assert_eq!(pthread_attr_setstacksize(&mut attr, DEFAULT_STACK_SIZE), 0); + assert_eq!(pthread_attr_setstacksize(&mut attr, + stack as libc::size_t), 0); assert_eq!(pthread_attr_setdetachstate(&mut attr, PTHREAD_CREATE_JOINABLE), 0); -- cgit 1.4.1-3-g733a5 From 018d60509c04cdebdf8b0d9e2b58f2604538e516 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Thu, 12 Dec 2013 21:38:57 -0800 Subject: std: Get stdtest all passing again This commit brings the library up-to-date in order to get all tests passing again --- mk/host.mk | 2 +- mk/tests.mk | 4 +- src/driver/driver.rs | 2 + src/libgreen/lib.rs | 2 + src/libnative/task.rs | 11 +- src/libstd/any.rs | 15 +- src/libstd/comm/mod.rs | 412 +++++++++++++++------------------- src/libstd/comm/select.rs | 94 ++------ src/libstd/io/fs.rs | 87 +++---- src/libstd/io/mod.rs | 4 + src/libstd/io/net/tcp.rs | 62 +++-- src/libstd/io/net/udp.rs | 11 +- src/libstd/io/net/unix.rs | 5 +- src/libstd/io/stdio.rs | 17 +- src/libstd/io/test.rs | 44 +++- src/libstd/lib.rs | 8 +- src/libstd/local_data.rs | 1 + src/libstd/rt/local.rs | 19 +- src/libstd/rt/task.rs | 94 ++++---- src/libstd/run.rs | 4 +- src/libstd/sync/arc.rs | 1 - src/libstd/sync/mpmc_bounded_queue.rs | 12 +- src/libstd/sync/mpsc_queue.rs | 10 +- src/libstd/sync/spsc_queue.rs | 7 +- src/libstd/task.rs | 98 +++----- src/libstd/unstable/mutex.rs | 2 +- src/libstd/unstable/stack.rs | 9 +- src/libstd/unstable/sync.rs | 3 +- src/libstd/vec.rs | 1 - 29 files changed, 451 insertions(+), 590 deletions(-) (limited to 'src/libstd/rt') diff --git a/mk/host.mk b/mk/host.mk index 7aabff52bc4..f94afe587f3 100644 --- a/mk/host.mk +++ b/mk/host.mk @@ -24,7 +24,7 @@ define CP_HOST_STAGE_N # Note: $(3) and $(4) are both the same! $$(HBIN$(2)_H_$(4))/rustc$$(X_$(4)): \ - $$(TBIN$(1)_T_$(4)_H_$(3))/rustc$$(X_$(4)) + $$(TBIN$(1)_T_$(4)_H_$(3))/rustc$$(X_$(4)) \ $$(HLIBRUSTC_DEFAULT$(2)_H_$(4)) \ | $$(HBIN$(2)_H_$(4))/ @$$(call E, cp: $$@) diff --git a/mk/tests.mk b/mk/tests.mk index 179e41ad330..9fd9d9617c7 100644 --- a/mk/tests.mk +++ b/mk/tests.mk @@ -348,13 +348,13 @@ STDTESTDEP_$(1)_$(2)_$(3) = endif $(3)/stage$(1)/test/stdtest-$(2)$$(X_$(2)): \ - $$(STDLIB_CRATE) $$(STDLIB_INPUTS) \ + $$(STDLIB_CRATE) $$(STDLIB_INPUTS) \ $$(STDTESTDEP_$(1)_$(2)_$(3)) @$$(call E, compile_and_link: $$@) $$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test $(3)/stage$(1)/test/extratest-$(2)$$(X_$(2)): \ - $$(EXTRALIB_CRATE) $$(EXTRALIB_INPUTS) \ + $$(EXTRALIB_CRATE) $$(EXTRALIB_INPUTS) \ $$(STDTESTDEP_$(1)_$(2)_$(3)) @$$(call E, compile_and_link: $$@) $$(STAGE$(1)_T_$(2)_H_$(3)) -o $$@ $$< --test diff --git a/src/driver/driver.rs b/src/driver/driver.rs index 9402578d552..8e5b6356a0b 100644 --- a/src/driver/driver.rs +++ b/src/driver/driver.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#[cfg(stage0)] extern mod green; + #[cfg(rustpkg)] extern mod this = "rustpkg"; diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs index 193b64ff7e5..6530316a627 100644 --- a/src/libgreen/lib.rs +++ b/src/libgreen/lib.rs @@ -57,6 +57,8 @@ pub mod sleeper_list; pub mod stack; pub mod task; +#[cfg(test)] mod tests; + #[cfg(stage0)] #[lang = "start"] pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { diff --git a/src/libnative/task.rs b/src/libnative/task.rs index fa7500ca85e..782bef10c92 100644 --- a/src/libnative/task.rs +++ b/src/libnative/task.rs @@ -22,7 +22,7 @@ use std::rt::task::{Task, BlockedTask}; use std::rt::thread::Thread; use std::rt; use std::sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT}; -use std::task::TaskOpts; +use std::task::{TaskOpts, default_task_opts}; use std::unstable::mutex::{Mutex, MUTEX_INIT}; use std::unstable::stack; @@ -73,9 +73,14 @@ pub fn new() -> ~Task { return task; } +/// Spawns a function with the default configuration +pub fn spawn(f: proc()) { + spawn_opts(default_task_opts(), f) +} + /// Spawns a new task given the configuration options and a procedure to run /// inside the task. -pub fn spawn(opts: TaskOpts, f: proc()) { +pub fn spawn_opts(opts: TaskOpts, f: proc()) { // must happen before the spawn, no need to synchronize with a lock. unsafe { THREAD_CNT.fetch_add(1, SeqCst); } @@ -238,7 +243,7 @@ impl rt::Runtime for Ops { cur_task.put_runtime(self as ~rt::Runtime); Local::put(cur_task); - task::spawn(opts, f); + task::spawn_opts(opts, f); } fn local_io<'a>(&'a mut self) -> Option> { diff --git a/src/libstd/any.rs b/src/libstd/any.rs index 49bae30a461..45a91d01b7a 100644 --- a/src/libstd/any.rs +++ b/src/libstd/any.rs @@ -119,7 +119,7 @@ impl<'a> AnyMutRefExt<'a> for &'a mut Any { /// Extension methods for a owning `Any` trait object pub trait AnyOwnExt { /// Returns the boxed value if it is of type `T`, or - /// `None` if it isn't. + /// `Err(Self)` if it isn't. fn move(self) -> Result<~T, Self>; } @@ -156,9 +156,8 @@ impl<'a> ToStr for &'a Any { #[cfg(test)] mod tests { + use prelude::*; use super::*; - use super::AnyRefExt; - use option::{Some, None}; #[deriving(Eq)] struct Test; @@ -385,8 +384,14 @@ mod tests { let a = ~8u as ~Any; let b = ~Test as ~Any; - assert_eq!(a.move(), Ok(~8u)); - assert_eq!(b.move(), Ok(~Test)); + match a.move::() { + Ok(a) => { assert_eq!(a, ~8u); } + Err(..) => fail!() + } + match b.move::() { + Ok(a) => { assert_eq!(a, ~Test); } + Err(..) => fail!() + } let a = ~8u as ~Any; let b = ~Test as ~Any; diff --git a/src/libstd/comm/mod.rs b/src/libstd/comm/mod.rs index f5048ec62a4..76a9e5d17e1 100644 --- a/src/libstd/comm/mod.rs +++ b/src/libstd/comm/mod.rs @@ -251,18 +251,21 @@ macro_rules! test ( mod $name { #[allow(unused_imports)]; - use util; - use super::super::*; + use native; use prelude::*; + use super::*; + use super::super::*; + use task; + use util; fn f() $b $($a)* #[test] fn uv() { f() } - $($a)* #[test] - #[ignore(cfg(windows))] // FIXME(#11003) - fn native() { - use unstable::run_in_bare_thread; - run_in_bare_thread(f); + $($a)* #[test] fn native() { + use native; + let (p, c) = Chan::new(); + do native::task::spawn { c.send(f()) } + p.recv(); } } ) @@ -889,10 +892,16 @@ impl Drop for Port { mod test { use prelude::*; - use task; - use rt::thread::Thread; + use native; + use os; use super::*; - use rt::test::*; + + pub fn stress_factor() -> uint { + match os::getenv("RUST_TEST_STRESS") { + Some(val) => from_str::(val).unwrap(), + None => 1, + } + } test!(fn smoke() { let (p, c) = Chan::new(); @@ -919,99 +928,88 @@ mod test { assert_eq!(p.recv(), 1); }) - #[test] - fn smoke_threads() { + test!(fn smoke_threads() { let (p, c) = Chan::new(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { c.send(1); } assert_eq!(p.recv(), 1); - } + }) - #[test] #[should_fail] - fn smoke_port_gone() { + test!(fn smoke_port_gone() { let (p, c) = Chan::new(); drop(p); c.send(1); - } + } #[should_fail]) - #[test] #[should_fail] - fn smoke_shared_port_gone() { + test!(fn smoke_shared_port_gone() { let (p, c) = SharedChan::new(); drop(p); c.send(1); - } + } #[should_fail]) - #[test] #[should_fail] - fn smoke_shared_port_gone2() { + test!(fn smoke_shared_port_gone2() { let (p, c) = SharedChan::new(); drop(p); let c2 = c.clone(); drop(c); c2.send(1); - } + } #[should_fail]) - #[test] #[should_fail] - fn port_gone_concurrent() { + test!(fn port_gone_concurrent() { let (p, c) = Chan::new(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { p.recv(); } loop { c.send(1) } - } + } #[should_fail]) - #[test] #[should_fail] - fn port_gone_concurrent_shared() { + test!(fn port_gone_concurrent_shared() { let (p, c) = SharedChan::new(); let c1 = c.clone(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { p.recv(); } loop { c.send(1); c1.send(1); } - } + } #[should_fail]) - #[test] #[should_fail] - fn smoke_chan_gone() { + test!(fn smoke_chan_gone() { let (p, c) = Chan::::new(); drop(c); p.recv(); - } + } #[should_fail]) - #[test] #[should_fail] - fn smoke_chan_gone_shared() { + test!(fn smoke_chan_gone_shared() { let (p, c) = SharedChan::<()>::new(); let c2 = c.clone(); drop(c); drop(c2); p.recv(); - } + } #[should_fail]) - #[test] #[should_fail] - fn chan_gone_concurrent() { + test!(fn chan_gone_concurrent() { let (p, c) = Chan::new(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { c.send(1); c.send(1); } loop { p.recv(); } - } + } #[should_fail]) - #[test] - fn stress() { + test!(fn stress() { let (p, c) = Chan::new(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { for _ in range(0, 10000) { c.send(1); } } for _ in range(0, 10000) { assert_eq!(p.recv(), 1); } - } + }) - #[test] - fn stress_shared() { + test!(fn stress_shared() { static AMT: uint = 10000; static NTHREADS: uint = 8; let (p, c) = SharedChan::::new(); @@ -1027,47 +1025,53 @@ mod test { for _ in range(0, NTHREADS) { let c = c.clone(); - do task::spawn_sched(task::SingleThreaded) { + do spawn { for _ in range(0, AMT) { c.send(1); } } } p1.recv(); - - } + }) #[test] #[ignore(cfg(windows))] // FIXME(#11003) fn send_from_outside_runtime() { let (p, c) = Chan::::new(); let (p1, c1) = Chan::new(); + let (port, chan) = SharedChan::new(); + let chan2 = chan.clone(); do spawn { c1.send(()); for _ in range(0, 40) { assert_eq!(p.recv(), 1); } + chan2.send(()); } p1.recv(); - let t = do Thread::start { + do native::task::spawn { for _ in range(0, 40) { c.send(1); } - }; - t.join(); + chan.send(()); + } + port.recv(); + port.recv(); } #[test] #[ignore(cfg(windows))] // FIXME(#11003) fn recv_from_outside_runtime() { let (p, c) = Chan::::new(); - let t = do Thread::start { + let (dp, dc) = Chan::new(); + do native::task::spawn { for _ in range(0, 40) { assert_eq!(p.recv(), 1); } + dc.send(()); }; for _ in range(0, 40) { c.send(1); } - t.join(); + dp.recv(); } #[test] @@ -1075,173 +1079,132 @@ mod test { fn no_runtime() { let (p1, c1) = Chan::::new(); let (p2, c2) = Chan::::new(); - let t1 = do Thread::start { + let (port, chan) = SharedChan::new(); + let chan2 = chan.clone(); + do native::task::spawn { assert_eq!(p1.recv(), 1); c2.send(2); - }; - let t2 = do Thread::start { + chan2.send(()); + } + do native::task::spawn { c1.send(1); assert_eq!(p2.recv(), 2); - }; - t1.join(); - t2.join(); + chan.send(()); + } + port.recv(); + port.recv(); } - #[test] - fn oneshot_single_thread_close_port_first() { + test!(fn oneshot_single_thread_close_port_first() { // Simple test of closing without sending - do run_in_newsched_task { - let (port, _chan) = Chan::::new(); - { let _p = port; } - } - } + let (port, _chan) = Chan::::new(); + { let _p = port; } + }) - #[test] - fn oneshot_single_thread_close_chan_first() { + test!(fn oneshot_single_thread_close_chan_first() { // Simple test of closing without sending - do run_in_newsched_task { - let (_port, chan) = Chan::::new(); - { let _c = chan; } - } - } + let (_port, chan) = Chan::::new(); + { let _c = chan; } + }) - #[test] #[should_fail] - fn oneshot_single_thread_send_port_close() { + test!(fn oneshot_single_thread_send_port_close() { // Testing that the sender cleans up the payload if receiver is closed let (port, chan) = Chan::<~int>::new(); { let _p = port; } chan.send(~0); - } + } #[should_fail]) - #[test] - fn oneshot_single_thread_recv_chan_close() { + test!(fn oneshot_single_thread_recv_chan_close() { // Receiving on a closed chan will fail - do run_in_newsched_task { - let res = do spawntask_try { - let (port, chan) = Chan::<~int>::new(); - { let _c = chan; } - port.recv(); - }; - // What is our res? - assert!(res.is_err()); - } - } - - #[test] - fn oneshot_single_thread_send_then_recv() { - do run_in_newsched_task { + let res = do task::try { let (port, chan) = Chan::<~int>::new(); - chan.send(~10); - assert!(port.recv() == ~10); - } - } + { let _c = chan; } + port.recv(); + }; + // What is our res? + assert!(res.is_err()); + }) - #[test] - fn oneshot_single_thread_try_send_open() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - assert!(chan.try_send(10)); - assert!(port.recv() == 10); - } - } + test!(fn oneshot_single_thread_send_then_recv() { + let (port, chan) = Chan::<~int>::new(); + chan.send(~10); + assert!(port.recv() == ~10); + }) - #[test] - fn oneshot_single_thread_try_send_closed() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - { let _p = port; } - assert!(!chan.try_send(10)); - } - } + test!(fn oneshot_single_thread_try_send_open() { + let (port, chan) = Chan::::new(); + assert!(chan.try_send(10)); + assert!(port.recv() == 10); + }) - #[test] - fn oneshot_single_thread_try_recv_open() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - chan.send(10); - assert!(port.try_recv() == Some(10)); - } - } + test!(fn oneshot_single_thread_try_send_closed() { + let (port, chan) = Chan::::new(); + { let _p = port; } + assert!(!chan.try_send(10)); + }) - #[test] - fn oneshot_single_thread_try_recv_closed() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - { let _c = chan; } - assert!(port.recv_opt() == None); - } - } + test!(fn oneshot_single_thread_try_recv_open() { + let (port, chan) = Chan::::new(); + chan.send(10); + assert!(port.try_recv() == Some(10)); + }) - #[test] - fn oneshot_single_thread_peek_data() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - assert!(port.try_recv().is_none()); - chan.send(10); - assert!(port.try_recv().is_some()); - } - } + test!(fn oneshot_single_thread_try_recv_closed() { + let (port, chan) = Chan::::new(); + { let _c = chan; } + assert!(port.recv_opt() == None); + }) - #[test] - fn oneshot_single_thread_peek_close() { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - { let _c = chan; } - assert!(port.try_recv().is_none()); - assert!(port.try_recv().is_none()); - } - } + test!(fn oneshot_single_thread_peek_data() { + let (port, chan) = Chan::::new(); + assert!(port.try_recv().is_none()); + chan.send(10); + assert!(port.try_recv().is_some()); + }) - #[test] - fn oneshot_single_thread_peek_open() { - do run_in_newsched_task { - let (port, _) = Chan::::new(); - assert!(port.try_recv().is_none()); - } - } + test!(fn oneshot_single_thread_peek_close() { + let (port, chan) = Chan::::new(); + { let _c = chan; } + assert!(port.try_recv().is_none()); + assert!(port.try_recv().is_none()); + }) - #[test] - fn oneshot_multi_task_recv_then_send() { - do run_in_newsched_task { - let (port, chan) = Chan::<~int>::new(); - do spawntask { - assert!(port.recv() == ~10); - } + test!(fn oneshot_single_thread_peek_open() { + let (port, _) = Chan::::new(); + assert!(port.try_recv().is_none()); + }) - chan.send(~10); + test!(fn oneshot_multi_task_recv_then_send() { + let (port, chan) = Chan::<~int>::new(); + do spawn { + assert!(port.recv() == ~10); } - } - #[test] - fn oneshot_multi_task_recv_then_close() { - do run_in_newsched_task { - let (port, chan) = Chan::<~int>::new(); - do spawntask_later { - let _chan = chan; - } - let res = do spawntask_try { - assert!(port.recv() == ~10); - }; - assert!(res.is_err()); + chan.send(~10); + }) + + test!(fn oneshot_multi_task_recv_then_close() { + let (port, chan) = Chan::<~int>::new(); + do spawn { + let _chan = chan; } - } + let res = do task::try { + assert!(port.recv() == ~10); + }; + assert!(res.is_err()); + }) - #[test] - fn oneshot_multi_thread_close_stress() { + test!(fn oneshot_multi_thread_close_stress() { stress_factor().times(|| { - do run_in_newsched_task { - let (port, chan) = Chan::::new(); - let thread = do spawntask_thread { - let _p = port; - }; - let _chan = chan; - thread.join(); + let (port, chan) = Chan::::new(); + do spawn { + let _p = port; } + let _chan = chan; }) - } + }) - #[test] - fn oneshot_multi_thread_send_close_stress() { + test!(fn oneshot_multi_thread_send_close_stress() { stress_factor().times(|| { let (port, chan) = Chan::::new(); do spawn { @@ -1251,10 +1214,9 @@ mod test { chan.send(1); }; }) - } + }) - #[test] - fn oneshot_multi_thread_recv_close_stress() { + test!(fn oneshot_multi_thread_recv_close_stress() { stress_factor().times(|| { let (port, chan) = Chan::::new(); do spawn { @@ -1271,10 +1233,9 @@ mod test { } }; }) - } + }) - #[test] - fn oneshot_multi_thread_send_recv_stress() { + test!(fn oneshot_multi_thread_send_recv_stress() { stress_factor().times(|| { let (port, chan) = Chan::<~int>::new(); do spawn { @@ -1284,10 +1245,9 @@ mod test { assert!(port.recv() == ~10); } }) - } + }) - #[test] - fn stream_send_recv_stress() { + test!(fn stream_send_recv_stress() { stress_factor().times(|| { let (port, chan) = Chan::<~int>::new(); @@ -1297,7 +1257,7 @@ mod test { fn send(chan: Chan<~int>, i: int) { if i == 10 { return } - do spawntask_random { + do spawn { chan.send(~i); send(chan, i + 1); } @@ -1306,44 +1266,37 @@ mod test { fn recv(port: Port<~int>, i: int) { if i == 10 { return } - do spawntask_random { + do spawn { assert!(port.recv() == ~i); recv(port, i + 1); }; } }) - } + }) - #[test] - fn recv_a_lot() { + test!(fn recv_a_lot() { // Regression test that we don't run out of stack in scheduler context - do run_in_newsched_task { - let (port, chan) = Chan::new(); - 10000.times(|| { chan.send(()) }); - 10000.times(|| { port.recv() }); - } - } + let (port, chan) = Chan::new(); + 10000.times(|| { chan.send(()) }); + 10000.times(|| { port.recv() }); + }) - #[test] - fn shared_chan_stress() { - do run_in_mt_newsched_task { - let (port, chan) = SharedChan::new(); - let total = stress_factor() + 100; - total.times(|| { - let chan_clone = chan.clone(); - do spawntask_random { - chan_clone.send(()); - } - }); + test!(fn shared_chan_stress() { + let (port, chan) = SharedChan::new(); + let total = stress_factor() + 100; + total.times(|| { + let chan_clone = chan.clone(); + do spawn { + chan_clone.send(()); + } + }); - total.times(|| { - port.recv(); - }); - } - } + total.times(|| { + port.recv(); + }); + }) - #[test] - fn test_nested_recv_iter() { + test!(fn test_nested_recv_iter() { let (port, chan) = Chan::::new(); let (total_port, total_chan) = Chan::::new(); @@ -1360,10 +1313,9 @@ mod test { chan.send(2); drop(chan); assert_eq!(total_port.recv(), 6); - } + }) - #[test] - fn test_recv_iter_break() { + test!(fn test_recv_iter_break() { let (port, chan) = Chan::::new(); let (count_port, count_chan) = Chan::::new(); @@ -1385,5 +1337,5 @@ mod test { chan.try_send(2); drop(chan); assert_eq!(count_port.recv(), 4); - } + }) } diff --git a/src/libstd/comm/select.rs b/src/libstd/comm/select.rs index 68e1a05a653..302c9d9ea46 100644 --- a/src/libstd/comm/select.rs +++ b/src/libstd/comm/select.rs @@ -51,11 +51,11 @@ use ops::Drop; use option::{Some, None, Option}; use ptr::RawPtr; use result::{Ok, Err}; -use rt::thread::Thread; use rt::local::Local; use rt::task::Task; use super::{Packet, Port}; use sync::atomics::{Relaxed, SeqCst}; +use task; use uint; macro_rules! select { @@ -310,6 +310,7 @@ impl Iterator<*mut Packet> for PacketIterator { } #[cfg(test)] +#[allow(unused_imports)] mod test { use super::super::*; use prelude::*; @@ -365,19 +366,16 @@ mod test { ) }) - #[test] - fn unblocks() { - use std::io::timer; - + test!(fn unblocks() { let (mut p1, c1) = Chan::::new(); let (mut p2, _c2) = Chan::::new(); let (p3, c3) = Chan::::new(); do spawn { - timer::sleep(3); + 20.times(task::deschedule); c1.send(1); p3.recv(); - timer::sleep(3); + 20.times(task::deschedule); } select! ( @@ -389,18 +387,15 @@ mod test { a = p1.recv_opt() => { assert_eq!(a, None); }, _b = p2.recv() => { fail!() } ) - } - - #[test] - fn both_ready() { - use std::io::timer; + }) + test!(fn both_ready() { let (mut p1, c1) = Chan::::new(); let (mut p2, c2) = Chan::::new(); let (p3, c3) = Chan::<()>::new(); do spawn { - timer::sleep(3); + 20.times(task::deschedule); c1.send(1); c2.send(2); p3.recv(); @@ -414,11 +409,12 @@ mod test { a = p1.recv() => { assert_eq!(a, 1); }, a = p2.recv() => { assert_eq!(a, 2); } ) + assert_eq!(p1.try_recv(), None); + assert_eq!(p2.try_recv(), None); c3.send(()); - } + }) - #[test] - fn stress() { + test!(fn stress() { static AMT: int = 10000; let (mut p1, c1) = Chan::::new(); let (mut p2, c2) = Chan::::new(); @@ -442,69 +438,5 @@ mod test { ) c3.send(()); } - } - - #[test] - #[ignore(cfg(windows))] // FIXME(#11003) - fn stress_native() { - use std::rt::thread::Thread; - use std::unstable::run_in_bare_thread; - static AMT: int = 10000; - - do run_in_bare_thread { - let (mut p1, c1) = Chan::::new(); - let (mut p2, c2) = Chan::::new(); - let (p3, c3) = Chan::<()>::new(); - - let t = do Thread::start { - for i in range(0, AMT) { - if i % 2 == 0 { - c1.send(i); - } else { - c2.send(i); - } - p3.recv(); - } - }; - - for i in range(0, AMT) { - select! ( - i1 = p1.recv() => { assert!(i % 2 == 0 && i == i1); }, - i2 = p2.recv() => { assert!(i % 2 == 1 && i == i2); } - ) - c3.send(()); - } - t.join(); - } - } - - #[test] - #[ignore(cfg(windows))] // FIXME(#11003) - fn native_both_ready() { - use std::rt::thread::Thread; - use std::unstable::run_in_bare_thread; - - do run_in_bare_thread { - let (mut p1, c1) = Chan::::new(); - let (mut p2, c2) = Chan::::new(); - let (p3, c3) = Chan::<()>::new(); - - let t = do Thread::start { - c1.send(1); - c2.send(2); - p3.recv(); - }; - - select! ( - a = p1.recv() => { assert_eq!(a, 1); }, - b = p2.recv() => { assert_eq!(b, 2); } - ) - select! ( - a = p1.recv() => { assert_eq!(a, 1); }, - b = p2.recv() => { assert_eq!(b, 2); } - ) - c3.send(()); - t.join(); - } - } + }) } diff --git a/src/libstd/io/fs.rs b/src/libstd/io/fs.rs index ded1d254f3f..b4838d534dc 100644 --- a/src/libstd/io/fs.rs +++ b/src/libstd/io/fs.rs @@ -714,7 +714,7 @@ mod test { } } - fn tmpdir() -> TempDir { + pub fn tmpdir() -> TempDir { use os; use rand; let ret = os::tmpdir().join(format!("rust-{}", rand::random::())); @@ -722,32 +722,7 @@ mod test { TempDir(ret) } - macro_rules! test ( - { fn $name:ident() $b:block } => ( - mod $name { - use prelude::*; - use io::{SeekSet, SeekCur, SeekEnd, io_error, Read, Open, - ReadWrite}; - use io; - use str; - use io::fs::{File, rmdir, mkdir, readdir, rmdir_recursive, - mkdir_recursive, copy, unlink, stat, symlink, link, - readlink, chmod, lstat, change_file_times}; - use io::fs::test::tmpdir; - use util; - - fn f() $b - - #[test] fn uv() { f() } - #[test] fn native() { - use rt::test::run_in_newsched_task; - run_in_newsched_task(f); - } - } - ) - ) - - test!(fn file_test_io_smoke_test() { + iotest!(fn file_test_io_smoke_test() { let message = "it's alright. have a good time"; let tmpdir = tmpdir(); let filename = &tmpdir.join("file_rt_io_file_test.txt"); @@ -767,7 +742,7 @@ mod test { unlink(filename); }) - test!(fn invalid_path_raises() { + iotest!(fn invalid_path_raises() { let tmpdir = tmpdir(); let filename = &tmpdir.join("file_that_does_not_exist.txt"); let mut called = false; @@ -780,7 +755,7 @@ mod test { assert!(called); }) - test!(fn file_test_iounlinking_invalid_path_should_raise_condition() { + iotest!(fn file_test_iounlinking_invalid_path_should_raise_condition() { let tmpdir = tmpdir(); let filename = &tmpdir.join("file_another_file_that_does_not_exist.txt"); let mut called = false; @@ -790,7 +765,7 @@ mod test { assert!(called); }) - test!(fn file_test_io_non_positional_read() { + iotest!(fn file_test_io_non_positional_read() { let message: &str = "ten-four"; let mut read_mem = [0, .. 8]; let tmpdir = tmpdir(); @@ -815,7 +790,7 @@ mod test { assert_eq!(read_str, message); }) - test!(fn file_test_io_seek_and_tell_smoke_test() { + iotest!(fn file_test_io_seek_and_tell_smoke_test() { let message = "ten-four"; let mut read_mem = [0, .. 4]; let set_cursor = 4 as u64; @@ -841,7 +816,7 @@ mod test { assert_eq!(tell_pos_post_read, message.len() as u64); }) - test!(fn file_test_io_seek_and_write() { + iotest!(fn file_test_io_seek_and_write() { let initial_msg = "food-is-yummy"; let overwrite_msg = "-the-bar!!"; let final_msg = "foo-the-bar!!"; @@ -864,7 +839,7 @@ mod test { assert!(read_str == final_msg.to_owned()); }) - test!(fn file_test_io_seek_shakedown() { + iotest!(fn file_test_io_seek_shakedown() { use std::str; // 01234567890123 let initial_msg = "qwer-asdf-zxcv"; let chunk_one: &str = "qwer"; @@ -895,7 +870,7 @@ mod test { unlink(filename); }) - test!(fn file_test_stat_is_correct_on_is_file() { + iotest!(fn file_test_stat_is_correct_on_is_file() { let tmpdir = tmpdir(); let filename = &tmpdir.join("file_stat_correct_on_is_file.txt"); { @@ -908,7 +883,7 @@ mod test { unlink(filename); }) - test!(fn file_test_stat_is_correct_on_is_dir() { + iotest!(fn file_test_stat_is_correct_on_is_dir() { let tmpdir = tmpdir(); let filename = &tmpdir.join("file_stat_correct_on_is_dir"); mkdir(filename, io::UserRWX); @@ -917,7 +892,7 @@ mod test { rmdir(filename); }) - test!(fn file_test_fileinfo_false_when_checking_is_file_on_a_directory() { + iotest!(fn file_test_fileinfo_false_when_checking_is_file_on_a_directory() { let tmpdir = tmpdir(); let dir = &tmpdir.join("fileinfo_false_on_dir"); mkdir(dir, io::UserRWX); @@ -925,7 +900,7 @@ mod test { rmdir(dir); }) - test!(fn file_test_fileinfo_check_exists_before_and_after_file_creation() { + iotest!(fn file_test_fileinfo_check_exists_before_and_after_file_creation() { let tmpdir = tmpdir(); let file = &tmpdir.join("fileinfo_check_exists_b_and_a.txt"); File::create(file).write(bytes!("foo")); @@ -934,7 +909,7 @@ mod test { assert!(!file.exists()); }) - test!(fn file_test_directoryinfo_check_exists_before_and_after_mkdir() { + iotest!(fn file_test_directoryinfo_check_exists_before_and_after_mkdir() { let tmpdir = tmpdir(); let dir = &tmpdir.join("before_and_after_dir"); assert!(!dir.exists()); @@ -945,7 +920,7 @@ mod test { assert!(!dir.exists()); }) - test!(fn file_test_directoryinfo_readdir() { + iotest!(fn file_test_directoryinfo_readdir() { use std::str; let tmpdir = tmpdir(); let dir = &tmpdir.join("di_readdir"); @@ -976,11 +951,11 @@ mod test { rmdir(dir); }) - test!(fn recursive_mkdir_slash() { + iotest!(fn recursive_mkdir_slash() { mkdir_recursive(&Path::new("/"), io::UserRWX); }) - test!(fn unicode_path_is_dir() { + iotest!(fn unicode_path_is_dir() { assert!(Path::new(".").is_dir()); assert!(!Path::new("test/stdtest/fs.rs").is_dir()); @@ -998,7 +973,7 @@ mod test { assert!(filepath.exists()); }) - test!(fn unicode_path_exists() { + iotest!(fn unicode_path_exists() { assert!(Path::new(".").exists()); assert!(!Path::new("test/nonexistent-bogus-path").exists()); @@ -1010,7 +985,7 @@ mod test { assert!(!Path::new("test/unicode-bogus-path-각丁ー再见").exists()); }) - test!(fn copy_file_does_not_exist() { + iotest!(fn copy_file_does_not_exist() { let from = Path::new("test/nonexistent-bogus-path"); let to = Path::new("test/other-bogus-path"); match io::result(|| copy(&from, &to)) { @@ -1022,7 +997,7 @@ mod test { } }) - test!(fn copy_file_ok() { + iotest!(fn copy_file_ok() { let tmpdir = tmpdir(); let input = tmpdir.join("in.txt"); let out = tmpdir.join("out.txt"); @@ -1035,7 +1010,7 @@ mod test { assert_eq!(input.stat().perm, out.stat().perm); }) - test!(fn copy_file_dst_dir() { + iotest!(fn copy_file_dst_dir() { let tmpdir = tmpdir(); let out = tmpdir.join("out"); @@ -1045,7 +1020,7 @@ mod test { } }) - test!(fn copy_file_dst_exists() { + iotest!(fn copy_file_dst_exists() { let tmpdir = tmpdir(); let input = tmpdir.join("in"); let output = tmpdir.join("out"); @@ -1058,7 +1033,7 @@ mod test { (bytes!("foo")).to_owned()); }) - test!(fn copy_file_src_dir() { + iotest!(fn copy_file_src_dir() { let tmpdir = tmpdir(); let out = tmpdir.join("out"); @@ -1068,7 +1043,7 @@ mod test { assert!(!out.exists()); }) - test!(fn copy_file_preserves_perm_bits() { + iotest!(fn copy_file_preserves_perm_bits() { let tmpdir = tmpdir(); let input = tmpdir.join("in.txt"); let out = tmpdir.join("out.txt"); @@ -1083,7 +1058,7 @@ mod test { }) #[cfg(not(windows))] // FIXME(#10264) operation not permitted? - test!(fn symlinks_work() { + iotest!(fn symlinks_work() { let tmpdir = tmpdir(); let input = tmpdir.join("in.txt"); let out = tmpdir.join("out.txt"); @@ -1098,14 +1073,14 @@ mod test { }) #[cfg(not(windows))] // apparently windows doesn't like symlinks - test!(fn symlink_noexist() { + iotest!(fn symlink_noexist() { let tmpdir = tmpdir(); // symlinks can point to things that don't exist symlink(&tmpdir.join("foo"), &tmpdir.join("bar")); assert!(readlink(&tmpdir.join("bar")).unwrap() == tmpdir.join("foo")); }) - test!(fn readlink_not_symlink() { + iotest!(fn readlink_not_symlink() { let tmpdir = tmpdir(); match io::result(|| readlink(&*tmpdir)) { Ok(..) => fail!("wanted a failure"), @@ -1113,7 +1088,7 @@ mod test { } }) - test!(fn links_work() { + iotest!(fn links_work() { let tmpdir = tmpdir(); let input = tmpdir.join("in.txt"); let out = tmpdir.join("out.txt"); @@ -1139,7 +1114,7 @@ mod test { } }) - test!(fn chmod_works() { + iotest!(fn chmod_works() { let tmpdir = tmpdir(); let file = tmpdir.join("in.txt"); @@ -1156,7 +1131,7 @@ mod test { chmod(&file, io::UserFile); }) - test!(fn sync_doesnt_kill_anything() { + iotest!(fn sync_doesnt_kill_anything() { let tmpdir = tmpdir(); let path = tmpdir.join("in.txt"); @@ -1169,7 +1144,7 @@ mod test { drop(file); }) - test!(fn truncate_works() { + iotest!(fn truncate_works() { let tmpdir = tmpdir(); let path = tmpdir.join("in.txt"); @@ -1200,7 +1175,7 @@ mod test { drop(file); }) - test!(fn open_flavors() { + iotest!(fn open_flavors() { let tmpdir = tmpdir(); match io::result(|| File::open_mode(&tmpdir.join("a"), io::Open, diff --git a/src/libstd/io/mod.rs b/src/libstd/io/mod.rs index 0852c4cadb6..8481de73c7f 100644 --- a/src/libstd/io/mod.rs +++ b/src/libstd/io/mod.rs @@ -313,6 +313,10 @@ pub use self::net::udp::UdpStream; pub use self::pipe::PipeStream; pub use self::process::Process; +/// Testing helpers +#[cfg(test)] +mod test; + /// Synchronous, non-blocking filesystem operations. pub mod fs; diff --git a/src/libstd/io/net/tcp.rs b/src/libstd/io/net/tcp.rs index bd7d8bacb38..e7787692dd2 100644 --- a/src/libstd/io/net/tcp.rs +++ b/src/libstd/io/net/tcp.rs @@ -176,7 +176,7 @@ mod test { #[test] fn smoke_test_ip4() { let addr = next_test_ip4(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { port.recv(); @@ -195,7 +195,7 @@ mod test { #[test] fn smoke_test_ip6() { let addr = next_test_ip6(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { port.recv(); @@ -214,7 +214,7 @@ mod test { #[test] fn read_eof_ip4() { let addr = next_test_ip4(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { port.recv(); @@ -233,7 +233,7 @@ mod test { #[test] fn read_eof_ip6() { let addr = next_test_ip6(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { port.recv(); @@ -252,10 +252,10 @@ mod test { #[test] fn read_eof_twice_ip4() { let addr = next_test_ip4(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { - port.take().recv(); + port.recv(); let _stream = TcpStream::connect(addr); // Close } @@ -281,7 +281,7 @@ mod test { #[test] fn read_eof_twice_ip6() { let addr = next_test_ip6(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { port.recv(); @@ -310,7 +310,7 @@ mod test { #[test] fn write_close_ip4() { let addr = next_test_ip4(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { port.recv(); @@ -342,7 +342,7 @@ mod test { #[test] fn write_close_ip6() { let addr = next_test_ip6(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { port.recv(); @@ -375,7 +375,7 @@ mod test { fn multiple_connect_serial_ip4() { let addr = next_test_ip4(); let max = 10; - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { port.recv(); @@ -398,7 +398,7 @@ mod test { fn multiple_connect_serial_ip6() { let addr = next_test_ip6(); let max = 10; - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { port.recv(); @@ -421,16 +421,15 @@ mod test { fn multiple_connect_interleaved_greedy_schedule_ip4() { let addr = next_test_ip4(); static MAX: int = 10; - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { let mut acceptor = TcpListener::bind(addr).listen(); chan.send(()); for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) { - let stream = Cell::new(stream); // Start another task to handle the connection do spawn { - let mut stream = stream.take(); + let mut stream = stream; let mut buf = [0]; stream.read(buf); assert!(buf[0] == i as u8); @@ -460,15 +459,15 @@ mod test { fn multiple_connect_interleaved_greedy_schedule_ip6() { let addr = next_test_ip6(); static MAX: int = 10; - let (port, chan) = oneshot(); + let (port, chan) = Chan::<()>::new(); do spawn { let mut acceptor = TcpListener::bind(addr).listen(); + chan.send(()); for (i, stream) in acceptor.incoming().enumerate().take(MAX as uint) { - let stream = Cell::new(stream); // Start another task to handle the connection do spawn { - let mut stream = stream.take(); + let mut stream = stream; let mut buf = [0]; stream.read(buf); assert!(buf[0] == i as u8); @@ -498,16 +497,15 @@ mod test { fn multiple_connect_interleaved_lazy_schedule_ip4() { let addr = next_test_ip4(); static MAX: int = 10; - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { let mut acceptor = TcpListener::bind(addr).listen(); chan.send(()); for stream in acceptor.incoming().take(MAX as uint) { - let stream = Cell::new(stream); // Start another task to handle the connection do spawn { - let mut stream = stream.take(); + let mut stream = stream; let mut buf = [0]; stream.read(buf); assert!(buf[0] == 99); @@ -536,16 +534,15 @@ mod test { fn multiple_connect_interleaved_lazy_schedule_ip6() { let addr = next_test_ip6(); static MAX: int = 10; - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { let mut acceptor = TcpListener::bind(addr).listen(); chan.send(()); for stream in acceptor.incoming().take(MAX as uint) { - let stream = Cell::new(stream); // Start another task to handle the connection do spawn { - let mut stream = stream.take(); + let mut stream = stream; let mut buf = [0]; stream.read(buf); assert!(buf[0] == 99); @@ -573,23 +570,18 @@ mod test { #[cfg(test)] fn socket_name(addr: SocketAddr) { - do run_in_mt_newsched_task { - do spawntask { - let mut listener = TcpListener::bind(addr).unwrap(); - - // Make sure socket_name gives - // us the socket we binded to. - let so_name = listener.socket_name(); - assert!(so_name.is_some()); - assert_eq!(addr, so_name.unwrap()); + let mut listener = TcpListener::bind(addr).unwrap(); - } - } + // Make sure socket_name gives + // us the socket we binded to. + let so_name = listener.socket_name(); + assert!(so_name.is_some()); + assert_eq!(addr, so_name.unwrap()); } #[cfg(test)] fn peer_name(addr: SocketAddr) { - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { let mut acceptor = TcpListener::bind(addr).listen(); diff --git a/src/libstd/io/net/udp.rs b/src/libstd/io/net/udp.rs index 159823ba2b5..7cb8f741cf3 100644 --- a/src/libstd/io/net/udp.rs +++ b/src/libstd/io/net/udp.rs @@ -101,6 +101,7 @@ mod test { use super::*; use io::net::ip::{Ipv4Addr, SocketAddr}; use io::*; + use io::test::*; use prelude::*; #[test] #[ignore] @@ -121,7 +122,7 @@ mod test { fn socket_smoke_test_ip4() { let server_ip = next_test_ip4(); let client_ip = next_test_ip4(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { match UdpSocket::bind(client_ip) { @@ -154,7 +155,7 @@ mod test { fn socket_smoke_test_ip6() { let server_ip = next_test_ip6(); let client_ip = next_test_ip6(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::<()>::new(); do spawn { match UdpSocket::bind(client_ip) { @@ -168,7 +169,7 @@ mod test { match UdpSocket::bind(server_ip) { Some(ref mut server) => { - chan.take().send(()); + chan.send(()); let mut buf = [0]; match server.recvfrom(buf) { Some((nread, src)) => { @@ -187,7 +188,7 @@ mod test { fn stream_smoke_test_ip4() { let server_ip = next_test_ip4(); let client_ip = next_test_ip4(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { match UdpSocket::bind(client_ip) { @@ -223,7 +224,7 @@ mod test { fn stream_smoke_test_ip6() { let server_ip = next_test_ip6(); let client_ip = next_test_ip6(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { match UdpSocket::bind(client_ip) { diff --git a/src/libstd/io/net/unix.rs b/src/libstd/io/net/unix.rs index 8fd256a22f9..59a6903adbf 100644 --- a/src/libstd/io/net/unix.rs +++ b/src/libstd/io/net/unix.rs @@ -141,11 +141,12 @@ mod tests { use prelude::*; use super::*; use io::*; + use io::test::*; fn smalltest(server: proc(UnixStream), client: proc(UnixStream)) { let path1 = next_test_unix(); let path2 = path1.clone(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { port.recv(); @@ -229,7 +230,7 @@ mod tests { let times = 10; let path1 = next_test_unix(); let path2 = path1.clone(); - let (port, chan) = oneshot(); + let (port, chan) = Chan::new(); do spawn { port.recv(); diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index 88047aecda2..5249d331f72 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -308,23 +308,10 @@ impl Writer for StdWriter { #[cfg(test)] mod tests { - use super::*; - use rt::test::run_in_newsched_task; - - #[test] - fn smoke_uv() { + iotest!(fn smoke() { // Just make sure we can acquire handles stdin(); stdout(); stderr(); - } - - #[test] - fn smoke_native() { - do run_in_newsched_task { - stdin(); - stdout(); - stderr(); - } - } + }) } diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs index 212e4ebffa8..dd24150e03e 100644 --- a/src/libstd/io/test.rs +++ b/src/libstd/io/test.rs @@ -8,9 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#[macro_escape]; + +use os; +use prelude::*; +use rand; +use rand::Rng; +use std::io::net::ip::*; +use sync::atomics::{AtomicUint, INIT_ATOMIC_UINT, Relaxed}; + +macro_rules! iotest ( + { fn $name:ident() $b:block } => ( + mod $name { + #[allow(unused_imports)]; + + use super::super::*; + use super::*; + use io; + use prelude::*; + use io::*; + use io::fs::*; + use io::net::tcp::*; + use io::net::ip::*; + use io::net::udp::*; + use io::net::unix::*; + use str; + use util; + + fn f() $b + + #[test] fn green() { f() } + #[test] fn native() { + use native; + let (p, c) = Chan::new(); + do native::task::spawn { c.send(f()) } + p.recv(); + } + } + ) +) + /// Get a port number, starting at 9600, for use in tests pub fn next_test_port() -> u16 { - use unstable::atomics::{AtomicUint, INIT_ATOMIC_UINT, Relaxed}; static mut next_offset: AtomicUint = INIT_ATOMIC_UINT; unsafe { base_port() + next_offset.fetch_add(1, Relaxed) as u16 @@ -44,9 +83,6 @@ all want to use ports. This function figures out which workspace it is running in and assigns a port range based on it. */ fn base_port() -> u16 { - use os; - use str::StrSlice; - use vec::ImmutableVector; let base = 9600u16; let range = 1000u16; diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs index 200e4e63261..4f633a63bab 100644 --- a/src/libstd/lib.rs +++ b/src/libstd/lib.rs @@ -65,13 +65,15 @@ // When testing libstd, bring in libuv as the I/O backend so tests can print // things and all of the std::io tests have an I/O interface to run on top // of -#[cfg(test)] extern mod rustuv = "rustuv#0.9-pre"; +#[cfg(test)] extern mod rustuv = "rustuv"; +#[cfg(test)] extern mod native = "native"; +#[cfg(test)] extern mod green = "green"; // Make extra accessible for benchmarking -#[cfg(test)] extern mod extra = "extra#0.9-pre"; +#[cfg(test)] extern mod extra = "extra"; // Make std testable by not duplicating lang items. See #2912 -#[cfg(test)] extern mod realstd = "std#0.9-pre"; +#[cfg(test)] extern mod realstd = "std"; #[cfg(test)] pub use kinds = realstd::kinds; #[cfg(test)] pub use ops = realstd::ops; #[cfg(test)] pub use cmp = realstd::cmp; diff --git a/src/libstd/local_data.rs b/src/libstd/local_data.rs index 652aa4d8198..d7e11d2f3a7 100644 --- a/src/libstd/local_data.rs +++ b/src/libstd/local_data.rs @@ -432,6 +432,7 @@ mod tests { } #[test] + #[allow(dead_code)] fn test_tls_overwrite_multiple_types() { static str_key: Key<~str> = &Key; static box_key: Key<@()> = &Key; diff --git a/src/libstd/rt/local.rs b/src/libstd/rt/local.rs index ea27956ad90..1c04b6b43ce 100644 --- a/src/libstd/rt/local.rs +++ b/src/libstd/rt/local.rs @@ -49,7 +49,6 @@ impl Local> for Task { mod test { use option::None; use unstable::run_in_bare_thread; - use rt::test::*; use super::*; use rt::task::Task; use rt::local_ptr; @@ -58,8 +57,7 @@ mod test { fn thread_local_task_smoke_test() { do run_in_bare_thread { local_ptr::init(); - let mut sched = ~new_test_uv_sched(); - let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){}); + let task = ~Task::new(); Local::put(task); let task: ~Task = Local::take(); cleanup_task(task); @@ -70,12 +68,11 @@ mod test { fn thread_local_task_two_instances() { do run_in_bare_thread { local_ptr::init(); - let mut sched = ~new_test_uv_sched(); - let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){}); + let task = ~Task::new(); Local::put(task); let task: ~Task = Local::take(); cleanup_task(task); - let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){}); + let task = ~Task::new(); Local::put(task); let task: ~Task = Local::take(); cleanup_task(task); @@ -87,8 +84,7 @@ mod test { fn borrow_smoke_test() { do run_in_bare_thread { local_ptr::init(); - let mut sched = ~new_test_uv_sched(); - let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){}); + let task = ~Task::new(); Local::put(task); unsafe { @@ -103,8 +99,7 @@ mod test { fn borrow_with_return() { do run_in_bare_thread { local_ptr::init(); - let mut sched = ~new_test_uv_sched(); - let task = ~Task::new_root(&mut sched.stack_pool, None, proc(){}); + let task = ~Task::new(); Local::put(task); { @@ -116,5 +111,9 @@ mod test { } } + fn cleanup_task(mut t: ~Task) { + t.destroyed = true; + } + } diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 7602d7b0564..c0164891cd4 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -176,8 +176,12 @@ impl Task { // Cleanup the dynamic borrowck debugging info borrowck::clear_task_borrow_list(); + // TODO: dox + unsafe { + let me: *mut Task = Local::unsafe_borrow(); + (*me).death.collect_failure((*me).unwinder.result()); + } let mut me: ~Task = Local::take(); - me.death.collect_failure(me.unwinder.result()); me.destroyed = true; return me; } @@ -375,92 +379,76 @@ impl Drop for Death { #[cfg(test)] mod test { use super::*; - use rt::test::*; use prelude::*; + use task; #[test] fn local_heap() { - do run_in_newsched_task() { - let a = @5; - let b = a; - assert!(*a == 5); - assert!(*b == 5); - } + let a = @5; + let b = a; + assert!(*a == 5); + assert!(*b == 5); } #[test] fn tls() { use local_data; - do run_in_newsched_task() { - local_data_key!(key: @~str) - local_data::set(key, @~"data"); - assert!(*local_data::get(key, |k| k.map(|k| *k)).unwrap() == ~"data"); - local_data_key!(key2: @~str) - local_data::set(key2, @~"data"); - assert!(*local_data::get(key2, |k| k.map(|k| *k)).unwrap() == ~"data"); - } + local_data_key!(key: @~str) + local_data::set(key, @~"data"); + assert!(*local_data::get(key, |k| k.map(|k| *k)).unwrap() == ~"data"); + local_data_key!(key2: @~str) + local_data::set(key2, @~"data"); + assert!(*local_data::get(key2, |k| k.map(|k| *k)).unwrap() == ~"data"); } #[test] fn unwind() { - do run_in_newsched_task() { - let result = spawntask_try(proc()()); - rtdebug!("trying first assert"); - assert!(result.is_ok()); - let result = spawntask_try(proc() fail!()); - rtdebug!("trying second assert"); - assert!(result.is_err()); - } + let result = task::try(proc()()); + rtdebug!("trying first assert"); + assert!(result.is_ok()); + let result = task::try::<()>(proc() fail!()); + rtdebug!("trying second assert"); + assert!(result.is_err()); } #[test] fn rng() { - do run_in_uv_task() { - use rand::{rng, Rng}; - let mut r = rng(); - let _ = r.next_u32(); - } + use rand::{rng, Rng}; + let mut r = rng(); + let _ = r.next_u32(); } #[test] fn logging() { - do run_in_uv_task() { - info!("here i am. logging in a newsched task"); - } + info!("here i am. logging in a newsched task"); } #[test] fn comm_stream() { - do run_in_newsched_task() { - let (port, chan) = Chan::new(); - chan.send(10); - assert!(port.recv() == 10); - } + let (port, chan) = Chan::new(); + chan.send(10); + assert!(port.recv() == 10); } #[test] fn comm_shared_chan() { - do run_in_newsched_task() { - let (port, chan) = SharedChan::new(); - chan.send(10); - assert!(port.recv() == 10); - } + let (port, chan) = SharedChan::new(); + chan.send(10); + assert!(port.recv() == 10); } #[test] fn heap_cycles() { use option::{Option, Some, None}; - do run_in_newsched_task { - struct List { - next: Option<@mut List>, - } + struct List { + next: Option<@mut List>, + } - let a = @mut List { next: None }; - let b = @mut List { next: Some(a) }; + let a = @mut List { next: None }; + let b = @mut List { next: Some(a) }; - a.next = Some(b); - } + a.next = Some(b); } #[test] @@ -471,8 +459,8 @@ mod test { #[test] fn block_and_wake() { - do with_test_task |task| { - BlockedTask::block(task).wake().unwrap() - } + let task = ~Task::new(); + let mut task = BlockedTask::block(task).wake().unwrap(); + task.destroyed = true; } } diff --git a/src/libstd/run.rs b/src/libstd/run.rs index 15c0986f899..69704c855ee 100644 --- a/src/libstd/run.rs +++ b/src/libstd/run.rs @@ -426,13 +426,13 @@ mod tests { } fn writeclose(fd: c_int, s: &str) { - let mut writer = PipeStream::open(fd as int); + let mut writer = PipeStream::open(fd); writer.write(s.as_bytes()); } fn readclose(fd: c_int) -> ~str { let mut res = ~[]; - let mut reader = PipeStream::open(fd as int); + let mut reader = PipeStream::open(fd); let mut buf = [0, ..1024]; loop { match reader.read(buf) { diff --git a/src/libstd/sync/arc.rs b/src/libstd/sync/arc.rs index 7632ec6cf29..7b94a3acc2b 100644 --- a/src/libstd/sync/arc.rs +++ b/src/libstd/sync/arc.rs @@ -131,7 +131,6 @@ impl Drop for UnsafeArc{ mod tests { use prelude::*; use super::UnsafeArc; - use task; use mem::size_of; #[test] diff --git a/src/libstd/sync/mpmc_bounded_queue.rs b/src/libstd/sync/mpmc_bounded_queue.rs index b623976306d..fe51de4e42d 100644 --- a/src/libstd/sync/mpmc_bounded_queue.rs +++ b/src/libstd/sync/mpmc_bounded_queue.rs @@ -163,8 +163,8 @@ impl Clone for Queue { mod tests { use prelude::*; use option::*; - use task; use super::Queue; + use native; #[test] fn test() { @@ -172,14 +172,17 @@ mod tests { let nmsgs = 1000u; let mut q = Queue::with_capacity(nthreads*nmsgs); assert_eq!(None, q.pop()); + let (port, chan) = SharedChan::new(); for _ in range(0, nthreads) { let q = q.clone(); - do task::spawn_sched(task::SingleThreaded) { + let chan = chan.clone(); + do native::task::spawn { let mut q = q; for i in range(0, nmsgs) { assert!(q.push(i)); } + chan.send(()); } } @@ -188,7 +191,7 @@ mod tests { let (completion_port, completion_chan) = Chan::new(); completion_ports.push(completion_port); let q = q.clone(); - do task::spawn_sched(task::SingleThreaded) { + do native::task::spawn { let mut q = q; let mut i = 0u; loop { @@ -207,5 +210,8 @@ mod tests { for completion_port in completion_ports.mut_iter() { assert_eq!(nmsgs, completion_port.recv()); } + for _ in range(0, nthreads) { + port.recv(); + } } } diff --git a/src/libstd/sync/mpsc_queue.rs b/src/libstd/sync/mpsc_queue.rs index 89e56e3fa67..a249d6ed2e8 100644 --- a/src/libstd/sync/mpsc_queue.rs +++ b/src/libstd/sync/mpsc_queue.rs @@ -203,8 +203,8 @@ impl Consumer { mod tests { use prelude::*; - use task; use super::{queue, Data, Empty, Inconsistent}; + use native; #[test] fn test_full() { @@ -222,14 +222,17 @@ mod tests { Empty => {} Inconsistent | Data(..) => fail!() } + let (port, chan) = SharedChan::new(); for _ in range(0, nthreads) { let q = p.clone(); - do task::spawn_sched(task::SingleThreaded) { + let chan = chan.clone(); + do native::task::spawn { let mut q = q; for i in range(0, nmsgs) { q.push(i); } + chan.send(()); } } @@ -240,6 +243,9 @@ mod tests { Data(_) => { i += 1 } } } + for _ in range(0, nthreads) { + port.recv(); + } } } diff --git a/src/libstd/sync/spsc_queue.rs b/src/libstd/sync/spsc_queue.rs index c4abba04659..6f1b887c271 100644 --- a/src/libstd/sync/spsc_queue.rs +++ b/src/libstd/sync/spsc_queue.rs @@ -268,7 +268,7 @@ impl Drop for State { mod test { use prelude::*; use super::queue; - use task; + use native; #[test] fn smoke() { @@ -314,7 +314,8 @@ mod test { fn stress_bound(bound: uint) { let (c, mut p) = queue(bound, ()); - do task::spawn_sched(task::SingleThreaded) { + let (port, chan) = Chan::new(); + do native::task::spawn { let mut c = c; for _ in range(0, 100000) { loop { @@ -325,10 +326,12 @@ mod test { } } } + chan.send(()); } for _ in range(0, 100000) { p.push(1); } + port.recv(); } } } diff --git a/src/libstd/task.rs b/src/libstd/task.rs index 4632a3cf6e0..3b9cde5f44d 100644 --- a/src/libstd/task.rs +++ b/src/libstd/task.rs @@ -64,6 +64,7 @@ use send_str::{SendStr, IntoSendStr}; use str::Str; use util; +#[cfg(test)] use any::{AnyOwnExt, AnyRefExt}; #[cfg(test)] use comm::SharedChan; #[cfg(test)] use ptr; #[cfg(test)] use result; @@ -385,59 +386,43 @@ pub fn failing() -> bool { #[test] fn test_unnamed_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - do spawn { - with_task_name(|name| { - assert!(name.is_none()); - }) - } + do spawn { + with_task_name(|name| { + assert!(name.is_none()); + }) } } #[test] fn test_owned_named_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - let mut t = task(); - t.name(~"ada lovelace"); - do t.spawn { - with_task_name(|name| { - assert!(name.unwrap() == "ada lovelace"); - }) - } + let mut t = task(); + t.name(~"ada lovelace"); + do t.spawn { + with_task_name(|name| { + assert!(name.unwrap() == "ada lovelace"); + }) } } #[test] fn test_static_named_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - let mut t = task(); - t.name("ada lovelace"); - do t.spawn { - with_task_name(|name| { - assert!(name.unwrap() == "ada lovelace"); - }) - } + let mut t = task(); + t.name("ada lovelace"); + do t.spawn { + with_task_name(|name| { + assert!(name.unwrap() == "ada lovelace"); + }) } } #[test] fn test_send_named_task() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - let mut t = task(); - t.name("ada lovelace".into_send_str()); - do t.spawn { - with_task_name(|name| { - assert!(name.unwrap() == "ada lovelace"); - }) - } + let mut t = task(); + t.name("ada lovelace".into_send_str()); + do t.spawn { + with_task_name(|name| { + assert!(name.unwrap() == "ada lovelace"); + }) } } @@ -508,28 +493,19 @@ fn test_try_fail() { } } -#[cfg(test)] -fn get_sched_id() -> int { - use rt::sched::Scheduler; - let mut sched = Local::borrow(None::); - sched.get().sched_id() as int -} - #[test] fn test_spawn_sched() { + use clone::Clone; + let (po, ch) = SharedChan::new(); fn f(i: int, ch: SharedChan<()>) { - let parent_sched_id = get_sched_id(); - - do spawn_sched(SingleThreaded) { - let child_sched_id = get_sched_id(); - assert!(parent_sched_id != child_sched_id); - + let ch = ch.clone(); + do spawn { if (i == 0) { ch.send(()); } else { - f(i - 1, ch.clone()); + f(i - 1, ch); } }; @@ -542,16 +518,9 @@ fn test_spawn_sched() { fn test_spawn_sched_childs_on_default_sched() { let (po, ch) = Chan::new(); - // Assuming tests run on the default scheduler - let default_id = get_sched_id(); - - do spawn_sched(SingleThreaded) { + do spawn { let ch = ch; - let parent_sched_id = get_sched_id(); do spawn { - let child_sched_id = get_sched_id(); - assert!(parent_sched_id != child_sched_id); - assert_eq!(child_sched_id, default_id); ch.send(()); }; }; @@ -562,6 +531,7 @@ fn test_spawn_sched_childs_on_default_sched() { #[test] fn test_spawn_sched_blocking() { use unstable::mutex::Mutex; + use num::Times; unsafe { @@ -574,7 +544,7 @@ fn test_spawn_sched_blocking() { let mut lock = Mutex::new(); let lock2 = lock.clone(); - do spawn_sched(SingleThreaded) { + do spawn { let mut lock = lock2; lock.lock(); @@ -681,11 +651,7 @@ fn test_child_doesnt_ref_parent() { #[test] fn test_simple_newsched_spawn() { - use rt::test::run_in_uv_task; - - do run_in_uv_task { - spawn(proc()()) - } + spawn(proc()()) } #[test] diff --git a/src/libstd/unstable/mutex.rs b/src/libstd/unstable/mutex.rs index eaf716f2726..5b2fac8e74e 100644 --- a/src/libstd/unstable/mutex.rs +++ b/src/libstd/unstable/mutex.rs @@ -333,12 +333,12 @@ mod test { fn somke_cond() { static mut lock: Mutex = MUTEX_INIT; unsafe { + lock.lock(); let t = do Thread::start { lock.lock(); lock.signal(); lock.unlock(); }; - lock.lock(); lock.wait(); lock.unlock(); t.join(); diff --git a/src/libstd/unstable/stack.rs b/src/libstd/unstable/stack.rs index 46a3a80be25..b8788b8c55c 100644 --- a/src/libstd/unstable/stack.rs +++ b/src/libstd/unstable/stack.rs @@ -24,11 +24,6 @@ //! detection is not guaranteed to continue in the future. Usage of this module //! is discouraged unless absolutely necessary. -use rt::task::Task; -use option::None; -use rt::local::Local; -use unstable::intrinsics; - static RED_ZONE: uint = 20 * 1024; /// This function is invoked from rust's current __morestack function. Segmented @@ -41,6 +36,10 @@ static RED_ZONE: uint = 20 * 1024; // irrelevant for documentation purposes. #[cfg(not(test))] // in testing, use the original libstd's version pub extern "C" fn rust_stack_exhausted() { + use rt::task::Task; + use option::None; + use rt::local::Local; + use unstable::intrinsics; unsafe { // We're calling this function because the stack just ran out. We need diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs index ad36f71cdea..687efea939b 100644 --- a/src/libstd/unstable/sync.rs +++ b/src/libstd/unstable/sync.rs @@ -161,9 +161,8 @@ impl Exclusive { mod tests { use option::*; use prelude::*; - use super::{Exclusive, UnsafeArc, atomic}; + use super::Exclusive; use task; - use mem::size_of; #[test] fn exclusive_new_arc() { diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs index 97d4c2f6d1b..86f28c28f69 100644 --- a/src/libstd/vec.rs +++ b/src/libstd/vec.rs @@ -2874,7 +2874,6 @@ impl Extendable for ~[A] { #[cfg(test)] mod tests { - use option::{None, Some}; use mem; use vec::*; use cmp::*; -- cgit 1.4.1-3-g733a5 From 3893716390f2c4857b7e8b1705a6344f96b85bb6 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 13 Dec 2013 16:26:02 -0800 Subject: Finalize the green::Pool type The scheduler pool now has a much more simplified interface. There is now a clear distinction between creating the pool and then interacting the pool. When a pool is created, all schedulers are not active, and only later if a spawn is done does activity occur. There are four operations that you can do on a pool: 1. Create a new pool. The only argument to this function is the configuration for the scheduler pool. Currently the only configuration parameter is the number of threads to initially spawn. 2. Spawn a task into this pool. This takes a procedure and task configuration options and spawns a new task into the pool of schedulers. 3. Spawn a new scheduler into the pool. This will return a handle on which to communicate with the scheduler in order to do something like a pinned task. 4. Shut down the scheduler pool. This will consume the scheduler pool, request all of the schedulers to shut down, and then wait on all the scheduler threads. Currently this will block the invoking OS thread, but I plan on making 'Thread::join' not a thread-blocking call. These operations can be used to encode all current usage of M:N schedulers, as well as providing a simple interface through which a pool can be modified. There is currently no way to remove a scheduler from a pool of scheduler, as there's no way to guarantee that a scheduler has exited. This may be added in the future, however (as necessary). --- src/libgreen/lib.rs | 276 +++++++++++++++++++------------------------------- src/libgreen/sched.rs | 5 + src/libgreen/task.rs | 48 +++++---- src/libnative/lib.rs | 5 +- src/libnative/task.rs | 50 +-------- src/libstd/rt/task.rs | 20 +++- src/libstd/task.rs | 31 +++--- 7 files changed, 185 insertions(+), 250 deletions(-) (limited to 'src/libstd/rt') diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs index 6530316a627..f4903ea38d2 100644 --- a/src/libgreen/lib.rs +++ b/src/libgreen/lib.rs @@ -39,13 +39,15 @@ use std::rt::task::Task; use std::rt::rtio; use std::sync::deque; use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT}; -use std::task::TaskResult; +use std::task::TaskOpts; use std::vec; use std::util; +use stdtask = std::rt::task; -use sched::{Shutdown, Scheduler, SchedHandle}; +use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, NewNeighbor}; use sleeper_list::SleeperList; -use task::{GreenTask, HomeSched}; +use stack::StackPool; +use task::GreenTask; mod macros; @@ -103,37 +105,17 @@ pub fn start(argc: int, argv: **u8, main: proc()) -> int { /// This function will not return until all schedulers in the associated pool /// have returned. pub fn run(main: proc()) -> int { - let config = Config { - shutdown_after_main_exits: true, - ..Config::new() - }; - Pool::spawn(config, main).wait(); + let mut pool = Pool::new(Config::new()); + pool.spawn(TaskOpts::new(), main); + unsafe { stdtask::wait_for_completion(); } + pool.shutdown(); os::get_exit_status() } /// Configuration of how an M:N pool of schedulers is spawned. pub struct Config { - /// If this flag is set, then when schedulers are spawned via the `start` - /// and `run` functions the thread invoking `start` and `run` will have a - /// scheduler spawned on it. This scheduler will be "special" in that the - /// main task will be pinned to the scheduler and it will not participate in - /// work stealing. - /// - /// If the `spawn` function is used to create a pool of schedulers, then - /// this option has no effect. - use_main_thread: bool, - /// The number of schedulers (OS threads) to spawn into this M:N pool. threads: uint, - - /// When the main function exits, this flag will dictate whether a shutdown - /// is requested of all schedulers. If this flag is `true`, this means that - /// schedulers will shut down as soon as possible after the main task exits - /// (but some may stay alive longer for things like I/O or other tasks). - /// - /// If this flag is `false`, then no action is taken when the `main` task - /// exits. The scheduler pool is then shut down via the `wait()` function. - shutdown_after_main_exits: bool, } impl Config { @@ -141,9 +123,7 @@ impl Config { /// variables of this process. pub fn new() -> Config { Config { - use_main_thread: false, threads: rt::default_sched_threads(), - shutdown_after_main_exits: false, } } } @@ -151,8 +131,14 @@ impl Config { /// A structure representing a handle to a pool of schedulers. This handle is /// used to keep the pool alive and also reap the status from the pool. pub struct Pool { + priv id: uint, priv threads: ~[Thread<()>], - priv handles: Option<~[SchedHandle]>, + priv handles: ~[SchedHandle], + priv stealers: ~[deque::Stealer<~task::GreenTask>], + priv next_friend: uint, + priv stack_pool: StackPool, + priv deque_pool: deque::BufferPool<~task::GreenTask>, + priv sleepers: SleeperList, } impl Pool { @@ -160,177 +146,125 @@ impl Pool { /// /// This will configure the pool according to the `config` parameter, and /// initially run `main` inside the pool of schedulers. - pub fn spawn(config: Config, main: proc()) -> Pool { + pub fn new(config: Config) -> Pool { static mut POOL_ID: AtomicUint = INIT_ATOMIC_UINT; - let Config { - threads: nscheds, - use_main_thread: use_main_sched, - shutdown_after_main_exits - } = config; - - let mut main = Some(main); - let pool_id = unsafe { POOL_ID.fetch_add(1, SeqCst) }; + let Config { threads: nscheds } = config; + assert!(nscheds > 0); - // The shared list of sleeping schedulers. - let sleepers = SleeperList::new(); + // The pool of schedulers that will be returned from this function + let mut pool = Pool { + threads: ~[], + handles: ~[], + stealers: ~[], + id: unsafe { POOL_ID.fetch_add(1, SeqCst) }, + sleepers: SleeperList::new(), + stack_pool: StackPool::new(), + deque_pool: deque::BufferPool::new(), + next_friend: 0, + }; // Create a work queue for each scheduler, ntimes. Create an extra // for the main thread if that flag is set. We won't steal from it. - let mut pool = deque::BufferPool::new(); - let arr = vec::from_fn(nscheds, |_| pool.deque()); + let arr = vec::from_fn(nscheds, |_| pool.deque_pool.deque()); let (workers, stealers) = vec::unzip(arr.move_iter()); + pool.stealers = stealers; - // The schedulers. - let mut scheds = ~[]; - // Handles to the schedulers. When the main task ends these will be - // sent the Shutdown message to terminate the schedulers. - let mut handles = ~[]; - + // Now that we've got all our work queues, create one scheduler per + // queue, spawn the scheduler into a thread, and be sure to keep a + // handle to the scheduler and the thread to keep them alive. for worker in workers.move_iter() { rtdebug!("inserting a regular scheduler"); - // Every scheduler is driven by an I/O event loop. - let loop_ = new_event_loop(); - let mut sched = ~Scheduler::new(pool_id, - loop_, + let mut sched = ~Scheduler::new(pool.id, + new_event_loop(), worker, - stealers.clone(), - sleepers.clone()); - let handle = sched.make_handle(); - - scheds.push(sched); - handles.push(handle); - } - - // If we need a main-thread task then create a main thread scheduler - // that will reject any task that isn't pinned to it - let main_sched = if use_main_sched { - - // Create a friend handle. - let mut friend_sched = scheds.pop(); - let friend_handle = friend_sched.make_handle(); - scheds.push(friend_sched); - - // This scheduler needs a queue that isn't part of the stealee - // set. - let (worker, _) = pool.deque(); - - let main_loop = new_event_loop(); - let mut main_sched = ~Scheduler::new_special(pool_id, - main_loop, - worker, - stealers.clone(), - sleepers.clone(), - false, - Some(friend_handle)); - let mut main_handle = main_sched.make_handle(); - // Allow the scheduler to exit when the main task exits. - // Note: sending the shutdown message also prevents the scheduler - // from pushing itself to the sleeper list, which is used for - // waking up schedulers for work stealing; since this is a - // non-work-stealing scheduler it should not be adding itself - // to the list. - main_handle.send(Shutdown); - Some(main_sched) - } else { - None - }; - - // The pool of schedulers that will be returned from this function - let mut pool = Pool { threads: ~[], handles: None }; - - // When the main task exits, after all the tasks in the main - // task tree, shut down the schedulers and set the exit code. - let mut on_exit = if shutdown_after_main_exits { - let handles = handles; - Some(proc(exit_success: TaskResult) { - let mut handles = handles; - for handle in handles.mut_iter() { - handle.send(Shutdown); - } - if exit_success.is_err() { - os::set_exit_status(rt::DEFAULT_ERROR_CODE); - } - }) - } else { - pool.handles = Some(handles); - None - }; - - if !use_main_sched { - - // In the case where we do not use a main_thread scheduler we - // run the main task in one of our threads. - - let mut main = GreenTask::new(&mut scheds[0].stack_pool, None, - main.take_unwrap()); - let mut main_task = ~Task::new(); - main_task.name = Some(SendStrStatic("
")); - main_task.death.on_exit = on_exit.take(); - main.put_task(main_task); - - let sched = scheds.pop(); - let main = main; - let thread = do Thread::start { - sched.bootstrap(main); - }; - pool.threads.push(thread); - } - - // Run each remaining scheduler in a thread. - for sched in scheds.move_rev_iter() { - rtdebug!("creating regular schedulers"); - let thread = do Thread::start { + pool.stealers.clone(), + pool.sleepers.clone()); + pool.handles.push(sched.make_handle()); + let sched = sched; + pool.threads.push(do Thread::start { let mut sched = sched; let mut task = do GreenTask::new(&mut sched.stack_pool, None) { rtdebug!("boostraping a non-primary scheduler"); }; task.put_task(~Task::new()); sched.bootstrap(task); - }; - pool.threads.push(thread); + }); } - // If we do have a main thread scheduler, run it now. + return pool; + } - if use_main_sched { - rtdebug!("about to create the main scheduler task"); + pub fn shutdown(mut self) { + self.stealers = ~[]; - let mut main_sched = main_sched.unwrap(); + for mut handle in util::replace(&mut self.handles, ~[]).move_iter() { + handle.send(Shutdown); + } + for thread in util::replace(&mut self.threads, ~[]).move_iter() { + thread.join(); + } + } - let home = HomeSched(main_sched.make_handle()); - let mut main = GreenTask::new_homed(&mut main_sched.stack_pool, None, - home, main.take_unwrap()); - let mut main_task = ~Task::new(); - main_task.name = Some(SendStrStatic("
")); - main_task.death.on_exit = on_exit.take(); - main.put_task(main_task); - rtdebug!("bootstrapping main_task"); + pub fn spawn(&mut self, opts: TaskOpts, f: proc()) { + let task = GreenTask::configure(&mut self.stack_pool, opts, f); - main_sched.bootstrap(main); + // Figure out someone to send this task to + let idx = self.next_friend; + self.next_friend += 1; + if self.next_friend >= self.handles.len() { + self.next_friend = 0; } - return pool; + // Jettison the task away! + self.handles[idx].send(TaskFromFriend(task)); } - /// Waits for the pool of schedulers to exit. If the pool was spawned to - /// shutdown after the main task exits, this will simply wait for all the - /// scheudlers to exit. If the pool was not spawned like that, this function - /// will trigger shutdown of all the active schedulers. The schedulers will - /// exit once all tasks in this pool of schedulers has exited. - pub fn wait(&mut self) { - match self.handles.take() { - Some(mut handles) => { - for handle in handles.mut_iter() { - handle.send(Shutdown); - } - } - None => {} + /// Spawns a new scheduler into this M:N pool. A handle is returned to the + /// scheduler for use. The scheduler will not exit as long as this handle is + /// active. + /// + /// The scheduler spawned will participate in work stealing with all of the + /// other schedulers currently in the scheduler pool. + pub fn spawn_sched(&mut self) -> SchedHandle { + let (worker, stealer) = self.deque_pool.deque(); + self.stealers.push(stealer.clone()); + + // Tell all existing schedulers about this new scheduler so they can all + // steal work from it + for handle in self.handles.mut_iter() { + handle.send(NewNeighbor(stealer.clone())); } - for thread in util::replace(&mut self.threads, ~[]).move_iter() { - thread.join(); + // Create the new scheduler, using the same sleeper list as all the + // other schedulers as well as having a stealer handle to all other + // schedulers. + let mut sched = ~Scheduler::new(self.id, + new_event_loop(), + worker, + self.stealers.clone(), + self.sleepers.clone()); + let ret = sched.make_handle(); + self.handles.push(sched.make_handle()); + let sched = sched; + self.threads.push(do Thread::start { + let mut sched = sched; + let mut task = do GreenTask::new(&mut sched.stack_pool, None) { + rtdebug!("boostraping a non-primary scheduler"); + }; + task.put_task(~Task::new()); + sched.bootstrap(task); + }); + + return ret; + } +} + +impl Drop for Pool { + fn drop(&mut self) { + if self.threads.len() > 0 { + fail!("dropping a M:N scheduler pool that wasn't shut down"); } } } diff --git a/src/libgreen/sched.rs b/src/libgreen/sched.rs index b0a49f2450a..e349ae1e601 100644 --- a/src/libgreen/sched.rs +++ b/src/libgreen/sched.rs @@ -393,6 +393,10 @@ impl Scheduler { stask.put_with_sched(self); return None; } + Some(NewNeighbor(neighbor)) => { + self.work_queues.push(neighbor); + return Some((self, stask)); + } None => { return Some((self, stask)); } @@ -831,6 +835,7 @@ type SchedulingFn = extern "Rust" fn (~Scheduler, ~GreenTask, ~GreenTask); pub enum SchedMessage { Wake, Shutdown, + NewNeighbor(deque::Stealer<~GreenTask>), PinnedTask(~GreenTask), TaskFromFriend(~GreenTask), RunOnce(~GreenTask), diff --git a/src/libgreen/task.rs b/src/libgreen/task.rs index 72e72f2cd99..e07d7f2413f 100644 --- a/src/libgreen/task.rs +++ b/src/libgreen/task.rs @@ -55,12 +55,15 @@ pub enum Home { } impl GreenTask { + /// Creates a new green task which is not homed to any particular scheduler + /// and will not have any contained Task structure. pub fn new(stack_pool: &mut StackPool, stack_size: Option, start: proc()) -> ~GreenTask { GreenTask::new_homed(stack_pool, stack_size, AnySched, start) } + /// Creates a new task (like `new`), but specifies the home for new task. pub fn new_homed(stack_pool: &mut StackPool, stack_size: Option, home: Home, @@ -71,6 +74,8 @@ impl GreenTask { return ops; } + /// Creates a new green task with the specified coroutine and type, this is + /// useful when creating scheduler tasks. pub fn new_typed(coroutine: Option, task_type: TaskType) -> ~GreenTask { ~GreenTask { @@ -84,6 +89,31 @@ impl GreenTask { } } + /// Creates a new green task with the given configuration options for the + /// contained Task object. The given stack pool is also used to allocate a + /// new stack for this task. + pub fn configure(pool: &mut StackPool, + opts: TaskOpts, + f: proc()) -> ~GreenTask { + let TaskOpts { + watched: _watched, + notify_chan, name, stack_size + } = opts; + + let mut green = GreenTask::new(pool, stack_size, f); + let mut task = ~Task::new(); + task.name = name; + match notify_chan { + Some(chan) => { + let on_exit = proc(task_result) { chan.send(task_result) }; + task.death.on_exit = Some(on_exit); + } + None => {} + } + green.put_task(task); + return green; + } + /// Just like the `maybe_take_runtime` function, this function should *not* /// exist. Usage of this function is _strongly_ discouraged. This is an /// absolute last resort necessary for converting a libstd task to a green @@ -367,11 +397,6 @@ impl Runtime for GreenTask { fn spawn_sibling(mut ~self, cur_task: ~Task, opts: TaskOpts, f: proc()) { self.put_task(cur_task); - let TaskOpts { - watched: _watched, - notify_chan, name, stack_size - } = opts; - // Spawns a task into the current scheduler. We allocate the new task's // stack from the scheduler's stack pool, and then configure it // accordingly to `opts`. Afterwards we bootstrap it immediately by @@ -379,18 +404,7 @@ impl Runtime for GreenTask { // // Upon returning, our task is back in TLS and we're good to return. let mut sched = self.sched.take_unwrap(); - let mut sibling = GreenTask::new(&mut sched.stack_pool, stack_size, f); - let mut sibling_task = ~Task::new(); - sibling_task.name = name; - match notify_chan { - Some(chan) => { - let on_exit = proc(task_result) { chan.send(task_result) }; - sibling_task.death.on_exit = Some(on_exit); - } - None => {} - } - - sibling.task = Some(sibling_task); + let sibling = GreenTask::configure(&mut sched.stack_pool, opts, f); sched.run_task(self, sibling) } diff --git a/src/libnative/lib.rs b/src/libnative/lib.rs index 40d8f6f0b46..b97d9127277 100644 --- a/src/libnative/lib.rs +++ b/src/libnative/lib.rs @@ -32,6 +32,7 @@ use std::os; use std::rt; +use stdtask = std::rt::task; pub mod io; pub mod task; @@ -81,7 +82,9 @@ pub fn start(argc: int, argv: **u8, main: proc()) -> int { pub fn run(main: proc()) -> int { // Create a task, run the procedure in it, and then wait for everything. task::run(task::new(), main); - task::wait_for_completion(); + + // Block this OS task waiting for everything to finish. + unsafe { stdtask::wait_for_completion() } os::get_exit_status() } diff --git a/src/libnative/task.rs b/src/libnative/task.rs index f0502a43990..1aa32bc8a26 100644 --- a/src/libnative/task.rs +++ b/src/libnative/task.rs @@ -21,49 +21,13 @@ use std::rt::rtio; use std::rt::task::{Task, BlockedTask}; use std::rt::thread::Thread; use std::rt; -use std::sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT}; -use std::task::{TaskOpts, default_task_opts}; -use std::unstable::mutex::{Mutex, MUTEX_INIT}; +use std::task::TaskOpts; +use std::unstable::mutex::Mutex; use std::unstable::stack; use io; use task; -static mut THREAD_CNT: AtomicUint = INIT_ATOMIC_UINT; -static mut LOCK: Mutex = MUTEX_INIT; - -/// Waits for all spawned threads to finish completion. This should only be used -/// by the main task in order to wait for all other tasks to terminate. -/// -/// This mirrors the same semantics as the green scheduling model. -pub fn wait_for_completion() { - static mut someone_waited: bool = false; - - unsafe { - LOCK.lock(); - assert!(!someone_waited); - someone_waited = true; - while THREAD_CNT.load(SeqCst) > 0 { - LOCK.wait(); - } - LOCK.unlock(); - LOCK.destroy(); - } - -} - -// Signal that a thread has finished execution, possibly waking up a blocker -// waiting for all threads to have finished. -fn signal_done() { - unsafe { - LOCK.lock(); - if THREAD_CNT.fetch_sub(1, SeqCst) == 1 { - LOCK.signal(); - } - LOCK.unlock(); - } -} - /// Creates a new Task which is ready to execute as a 1:1 task. pub fn new() -> ~Task { let mut task = ~Task::new(); @@ -75,15 +39,12 @@ pub fn new() -> ~Task { /// Spawns a function with the default configuration pub fn spawn(f: proc()) { - spawn_opts(default_task_opts(), f) + spawn_opts(TaskOpts::new(), f) } /// Spawns a new task given the configuration options and a procedure to run /// inside the task. pub fn spawn_opts(opts: TaskOpts, f: proc()) { - // must happen before the spawn, no need to synchronize with a lock. - unsafe { THREAD_CNT.fetch_add(1, SeqCst); } - let TaskOpts { watched: _watched, notify_chan, name, stack_size @@ -117,7 +78,6 @@ pub fn spawn_opts(opts: TaskOpts, f: proc()) { } run(task, f); - signal_done(); }) } @@ -290,7 +250,7 @@ mod tests { #[test] fn smoke_opts() { - let mut opts = task::default_task_opts(); + let mut opts = TaskOpts::new(); opts.name = Some(SendStrStatic("test")); opts.stack_size = Some(20 * 4096); let (p, c) = Chan::new(); @@ -301,7 +261,7 @@ mod tests { #[test] fn smoke_opts_fail() { - let mut opts = task::default_task_opts(); + let mut opts = TaskOpts::new(); let (p, c) = Chan::new(); opts.notify_chan = Some(c); spawn_opts(opts, proc() { fail!() }); diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index c0164891cd4..91e285b1061 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -33,12 +33,16 @@ use rt::rtio::LocalIo; use rt::unwind::Unwinder; use send_str::SendStr; use sync::arc::UnsafeArc; -use sync::atomics::{AtomicUint, SeqCst}; +use sync::atomics::{AtomicUint, SeqCst, INIT_ATOMIC_UINT}; use task::{TaskResult, TaskOpts}; use unstable::finally::Finally; +use unstable::mutex::{Mutex, MUTEX_INIT}; #[cfg(stage0)] pub use rt::unwind::begin_unwind; +static mut TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT; +static mut TASK_LOCK: Mutex = MUTEX_INIT; + // The Task struct represents all state associated with a rust // task. There are at this point two primary "subtypes" of task, // however instead of using a subtype we just have a "task_type" field @@ -117,6 +121,7 @@ impl Task { *cast::transmute::<&~Task, &*mut Task>(&self) }; Local::put(self); + unsafe { TASK_COUNT.fetch_add(1, SeqCst); } // The only try/catch block in the world. Attempt to run the task's // client-specified code and catch any failures. @@ -180,6 +185,11 @@ impl Task { unsafe { let me: *mut Task = Local::unsafe_borrow(); (*me).death.collect_failure((*me).unwinder.result()); + if TASK_COUNT.fetch_sub(1, SeqCst) == 1 { + TASK_LOCK.lock(); + TASK_LOCK.signal(); + TASK_LOCK.unlock(); + } } let mut me: ~Task = Local::take(); me.destroyed = true; @@ -376,6 +386,14 @@ impl Drop for Death { } } +pub unsafe fn wait_for_completion() { + TASK_LOCK.lock(); + while TASK_COUNT.load(SeqCst) > 0 { + TASK_LOCK.wait(); + } + TASK_LOCK.unlock(); +} + #[cfg(test)] mod test { use super::*; diff --git a/src/libstd/task.rs b/src/libstd/task.rs index 3b9cde5f44d..836390fb416 100644 --- a/src/libstd/task.rs +++ b/src/libstd/task.rs @@ -131,7 +131,7 @@ pub struct TaskBuilder { */ pub fn task() -> TaskBuilder { TaskBuilder { - opts: default_task_opts(), + opts: TaskOpts::new(), gen_body: None, can_not_copy: None, } @@ -301,22 +301,23 @@ impl TaskBuilder { } } - /* Task construction */ -pub fn default_task_opts() -> TaskOpts { - /*! - * The default task options - * - * By default all tasks are supervised by their parent, are spawned - * into the same scheduler, and do not post lifecycle notifications. - */ - - TaskOpts { - watched: true, - notify_chan: None, - name: None, - stack_size: None +impl TaskOpts { + pub fn new() -> TaskOpts { + /*! + * The default task options + * + * By default all tasks are supervised by their parent, are spawned + * into the same scheduler, and do not post lifecycle notifications. + */ + + TaskOpts { + watched: true, + notify_chan: None, + name: None, + stack_size: None + } } } -- cgit 1.4.1-3-g733a5 From 1a6d920e3df3d48168b22879a194538ec10c951a Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 13 Dec 2013 18:28:18 -0800 Subject: green: Allow specifying an IoFactory for pools This allows creation of different sched pools with different io factories. Namely, this will be used to test the basic I/O loop in the green crate. This can also be used to override the global default. --- src/libgreen/lib.rs | 22 ++++++++++++++++------ src/librustuv/homing.rs | 10 ++++++++-- src/librustuv/uvio.rs | 2 +- src/libstd/rt/crate_map.rs | 2 +- 4 files changed, 26 insertions(+), 10 deletions(-) (limited to 'src/libstd/rt') diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs index 9a3f27f7dbc..82d5bc83e2e 100644 --- a/src/libgreen/lib.rs +++ b/src/libgreen/lib.rs @@ -115,6 +115,9 @@ pub fn run(main: proc()) -> int { pub struct PoolConfig { /// The number of schedulers (OS threads) to spawn into this M:N pool. threads: uint, + /// A factory function used to create new event loops. If this is not + /// specified then the default event loop factory is used. + event_loop_factory: Option ~rtio::EventLoop>, } impl PoolConfig { @@ -123,6 +126,7 @@ impl PoolConfig { pub fn new() -> PoolConfig { PoolConfig { threads: rt::default_sched_threads(), + event_loop_factory: None, } } } @@ -138,6 +142,7 @@ pub struct SchedPool { priv stack_pool: StackPool, priv deque_pool: deque::BufferPool<~task::GreenTask>, priv sleepers: SleeperList, + priv factory: fn() -> ~rtio::EventLoop, } impl SchedPool { @@ -148,7 +153,11 @@ impl SchedPool { pub fn new(config: PoolConfig) -> SchedPool { static mut POOL_ID: AtomicUint = INIT_ATOMIC_UINT; - let PoolConfig { threads: nscheds } = config; + let PoolConfig { + threads: nscheds, + event_loop_factory: factory + } = config; + let factory = factory.unwrap_or(default_event_loop_factory()); assert!(nscheds > 0); // The pool of schedulers that will be returned from this function @@ -161,6 +170,7 @@ impl SchedPool { stack_pool: StackPool::new(), deque_pool: deque::BufferPool::new(), next_friend: 0, + factory: factory, }; // Create a work queue for each scheduler, ntimes. Create an extra @@ -176,7 +186,7 @@ impl SchedPool { rtdebug!("inserting a regular scheduler"); let mut sched = ~Scheduler::new(pool.id, - new_event_loop(), + (pool.factory)(), worker, pool.stealers.clone(), pool.sleepers.clone()); @@ -232,7 +242,7 @@ impl SchedPool { // other schedulers as well as having a stealer handle to all other // schedulers. let mut sched = ~Scheduler::new(self.id, - new_event_loop(), + (self.factory)(), worker, self.stealers.clone(), self.sleepers.clone()); @@ -270,13 +280,13 @@ impl Drop for SchedPool { } } -fn new_event_loop() -> ~rtio::EventLoop { +fn default_event_loop_factory() -> fn() -> ~rtio::EventLoop { match crate_map::get_crate_map() { None => {} Some(map) => { match map.event_loop_factory { None => {} - Some(factory) => return factory() + Some(factory) => return factory } } } @@ -284,5 +294,5 @@ fn new_event_loop() -> ~rtio::EventLoop { // If the crate map didn't specify a factory to create an event loop, then // instead just use a basic event loop missing all I/O services to at least // get the scheduler running. - return basic::event_loop(); + return basic::event_loop; } diff --git a/src/librustuv/homing.rs b/src/librustuv/homing.rs index 1f9e3831e20..1ee64398ca3 100644 --- a/src/librustuv/homing.rs +++ b/src/librustuv/homing.rs @@ -161,7 +161,10 @@ mod test { #[test] fn test_homing_closes_correctly() { let (port, chan) = Chan::new(); - let mut pool = SchedPool::new(PoolConfig { threads: 1 }); + let mut pool = SchedPool::new(PoolConfig { + threads: 1, + event_loop_factory: None, + }); do pool.spawn(TaskOpts::new()) { let listener = UdpWatcher::bind(local_loop(), next_test_ip4()); @@ -179,7 +182,10 @@ mod test { #[test] fn test_homing_read() { let (port, chan) = Chan::new(); - let mut pool = SchedPool::new(PoolConfig { threads: 1 }); + let mut pool = SchedPool::new(PoolConfig { + threads: 1, + event_loop_factory: None, + }); do pool.spawn(TaskOpts::new()) { let addr1 = next_test_ip4(); diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 57bb0cfdc7a..210ee2fc451 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -96,7 +96,7 @@ impl rtio::EventLoop for UvEventLoop { #[cfg(not(test))] #[lang = "event_loop_factory"] -pub extern "C" fn new_loop() -> ~rtio::EventLoop { +pub fn new_loop() -> ~rtio::EventLoop { ~UvEventLoop::new() as ~rtio::EventLoop } diff --git a/src/libstd/rt/crate_map.rs b/src/libstd/rt/crate_map.rs index 22fc3f0ab56..d9b40cfbb6e 100644 --- a/src/libstd/rt/crate_map.rs +++ b/src/libstd/rt/crate_map.rs @@ -30,7 +30,7 @@ pub struct CrateMap<'a> { version: i32, entries: &'a [ModEntry<'a>], children: &'a [&'a CrateMap<'a>], - event_loop_factory: Option ~EventLoop>, + event_loop_factory: Option ~EventLoop>, } #[cfg(not(windows))] -- cgit 1.4.1-3-g733a5 From 282f3d99a5ad85acbc58c03b5dfcdabf649c0c85 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Fri, 13 Dec 2013 21:14:08 -0800 Subject: Test fixes and rebase problems Note that this removes a number of run-pass tests which are exercising behavior of the old runtime. This functionality no longer exists and is thoroughly tested inside of libgreen and libnative. There isn't really the notion of "starting the runtime" any more. The major notion now is "bootstrapping the initial task". --- Makefile.in | 6 +++ mk/target.mk | 7 +-- src/compiletest/compiletest.rs | 4 +- src/etc/licenseck.py | 6 +-- src/libgreen/lib.rs | 22 ++++++--- src/libnative/lib.rs | 2 + src/librustuv/homing.rs | 2 + src/librustuv/queue.rs | 2 + src/libstd/io/test.rs | 79 +++++++++++++++++++++++++++++++ src/libstd/rt/task.rs | 18 ++++++- src/libstd/sync/arc.rs | 2 +- src/libsyntax/ext/build.rs | 1 - src/test/run-pass/core-rt-smoke.rs | 20 -------- src/test/run-pass/native-print-no-uv.rs | 17 ------- src/test/run-pass/rt-run-twice.rs | 26 ---------- src/test/run-pass/rt-start-main-thread.rs | 21 -------- src/test/run-pass/spawning-with-debug.rs | 1 - src/test/run-pass/use.rs | 5 +- 18 files changed, 134 insertions(+), 107 deletions(-) delete mode 100644 src/test/run-pass/core-rt-smoke.rs delete mode 100644 src/test/run-pass/native-print-no-uv.rs delete mode 100644 src/test/run-pass/rt-run-twice.rs delete mode 100644 src/test/run-pass/rt-start-main-thread.rs (limited to 'src/libstd/rt') diff --git a/Makefile.in b/Makefile.in index d5a62f11e05..f1b18e8f64b 100644 --- a/Makefile.in +++ b/Makefile.in @@ -280,9 +280,15 @@ define CHECK_FOR_OLD_GLOB_MATCHES_EXCEPT endef # Same interface as above, but deletes rather than just listing the files. +ifdef VERBOSE define REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT $(Q)MATCHES="$(filter-out %$(3),$(wildcard $(1)/$(2)))"; if [ -n "$$MATCHES" ] ; then echo "warning: removing previous" \'$(2)\' "libraries:" $$MATCHES; rm $$MATCHES ; fi endef +else +define REMOVE_ALL_OLD_GLOB_MATCHES_EXCEPT + $(Q)MATCHES="$(filter-out %$(3),$(wildcard $(1)/$(2)))"; if [ -n "$$MATCHES" ] ; then rm $$MATCHES ; fi +endef +endif # We use a different strategy for LIST_ALL_OLD_GLOB_MATCHES_EXCEPT # than in the macros above because it needs the result of running the diff --git a/mk/target.mk b/mk/target.mk index 3746a4eafc0..db8488f792f 100644 --- a/mk/target.mk +++ b/mk/target.mk @@ -161,16 +161,13 @@ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(3)): \ $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTC_GLOB_$(2)),$$(notdir $$@)) $$(call LIST_ALL_OLD_GLOB_MATCHES_EXCEPT,$$(dir $$@),$(LIBRUSTC_RGLOB_$(2)),$$(notdir $$@)) -# NOTE: after the next snapshot remove these '-L' flags $$(TBIN$(1)_T_$(2)_H_$(3))/rustc$$(X_$(3)): \ $$(DRIVER_CRATE) \ - $$(TSREQ$(1)_T_$(2)_H_$(3)) \ + $$(SREQ$(1)_T_$(2)_H_$(3)) \ $$(TLIB$(1)_T_$(2)_H_$(3))/$(CFG_LIBRUSTC_$(3)) \ | $$(TBIN$(1)_T_$(2)_H_$(3))/ @$$(call E, compile_and_link: $$@) - $$(STAGE$(1)_T_$(2)_H_$(3)) --cfg rustc -o $$@ $$< \ - -L $$(UV_SUPPORT_DIR_$(2)) \ - -L $$(dir $$(LIBUV_LIB_$(2))) + $$(STAGE$(1)_T_$(2)_H_$(3)) --cfg rustc -o $$@ $$< ifdef CFG_ENABLE_PAX_FLAGS @$$(call E, apply PaX flags: $$@) @"$(CFG_PAXCTL)" -cm "$$@" diff --git a/src/compiletest/compiletest.rs b/src/compiletest/compiletest.rs index 0fb75b7c8e0..89b6f06abfc 100644 --- a/src/compiletest/compiletest.rs +++ b/src/compiletest/compiletest.rs @@ -16,7 +16,7 @@ extern mod extra; use std::os; -use std::rt; +use std::io; use std::io::fs; use extra::getopts; @@ -234,7 +234,7 @@ pub fn run_tests(config: &config) { // sadly osx needs some file descriptor limits raised for running tests in // parallel (especially when we have lots and lots of child processes). // For context, see #8904 - rt::test::prepare_for_lots_of_tests(); + io::test::raise_fd_limit(); let res = test::run_tests_console(&opts, tests); if !res { fail!("Some tests failed"); } } diff --git a/src/etc/licenseck.py b/src/etc/licenseck.py index 78d0973fdfe..073322b0815 100644 --- a/src/etc/licenseck.py +++ b/src/etc/licenseck.py @@ -76,9 +76,9 @@ exceptions = [ "rt/isaac/randport.cpp", # public domain "rt/isaac/rand.h", # public domain "rt/isaac/standard.h", # public domain - "libstd/rt/mpsc_queue.rs", # BSD - "libstd/rt/spsc_queue.rs", # BSD - "libstd/rt/mpmc_bounded_queue.rs", # BSD + "libstd/sync/mpsc_queue.rs", # BSD + "libstd/sync/spsc_queue.rs", # BSD + "libstd/sync/mpmc_bounded_queue.rs", # BSD ] def check_license(name, contents): diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs index bb219936ae0..57e2a0bfe16 100644 --- a/src/libgreen/lib.rs +++ b/src/libgreen/lib.rs @@ -17,6 +17,7 @@ //! This can be optionally linked in to rust programs in order to provide M:N //! functionality inside of 1:1 programs. +#[pkgid = "green#0.9-pre"]; #[link(name = "green", package_id = "green", vers = "0.9-pre", @@ -30,17 +31,16 @@ // NB this does *not* include globs, please keep it that way. #[feature(macro_rules)]; -use std::cast; use std::os; -use std::rt::thread::Thread; -use std::rt; use std::rt::crate_map; use std::rt::rtio; -use std::sync::deque; +use std::rt::thread::Thread; +use std::rt; use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT}; +use std::sync::deque; use std::task::TaskOpts; -use std::vec; use std::util; +use std::vec; use stdtask = std::rt::task; use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, NewNeighbor}; @@ -58,9 +58,9 @@ pub mod sleeper_list; pub mod stack; pub mod task; -#[cfg(stage0)] #[lang = "start"] pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { + use std::cast; do start(argc, argv) { let main: extern "Rust" fn() = unsafe { cast::transmute(main) }; main(); @@ -103,7 +103,15 @@ pub fn start(argc: int, argv: **u8, main: proc()) -> int { /// have returned. pub fn run(main: proc()) -> int { let mut pool = SchedPool::new(PoolConfig::new()); - pool.spawn(TaskOpts::new(), main); + let (port, chan) = Chan::new(); + let mut opts = TaskOpts::new(); + opts.notify_chan = Some(chan); + pool.spawn(opts, main); + do pool.spawn(TaskOpts::new()) { + if port.recv().is_err() { + os::set_exit_status(rt::DEFAULT_ERROR_CODE); + } + } unsafe { stdtask::wait_for_completion(); } pool.shutdown(); os::get_exit_status() diff --git a/src/libnative/lib.rs b/src/libnative/lib.rs index b97d9127277..44b66a7804d 100644 --- a/src/libnative/lib.rs +++ b/src/libnative/lib.rs @@ -14,6 +14,7 @@ //! runtime. In addition, all I/O provided by this crate is the thread blocking //! version of I/O. +#[pkgid = "native#0.9-pre"]; #[link(name = "native", package_id = "native", vers = "0.9-pre", @@ -24,6 +25,7 @@ #[crate_type = "rlib"]; #[crate_type = "dylib"]; +// Allow check-stage0-native for now #[cfg(stage0, test)] extern mod green; // NB this crate explicitly does *not* allow glob imports, please seriously diff --git a/src/librustuv/homing.rs b/src/librustuv/homing.rs index 1ee64398ca3..d7be06724a0 100644 --- a/src/librustuv/homing.rs +++ b/src/librustuv/homing.rs @@ -31,6 +31,8 @@ //! This enqueueing is done with a concurrent queue from libstd, and the //! signalling is achieved with an async handle. +#[allow(dead_code)]; + use std::rt::local::Local; use std::rt::rtio::LocalIo; use std::rt::task::{Task, BlockedTask}; diff --git a/src/librustuv/queue.rs b/src/librustuv/queue.rs index 22e7925b211..b36bdf62775 100644 --- a/src/librustuv/queue.rs +++ b/src/librustuv/queue.rs @@ -18,6 +18,8 @@ //! event loop alive we use uv_ref and uv_unref in order to control when the //! async handle is active or not. +#[allow(dead_code)]; + use std::cast; use std::libc::{c_void, c_int}; use std::rt::task::BlockedTask; diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs index dd24150e03e..e273aedf7cc 100644 --- a/src/libstd/io/test.rs +++ b/src/libstd/io/test.rs @@ -113,3 +113,82 @@ fn base_port() -> u16 { return final_base; } + +pub fn raise_fd_limit() { + unsafe { darwin_fd_limit::raise_fd_limit() } +} + +#[cfg(target_os="macos")] +#[allow(non_camel_case_types)] +mod darwin_fd_limit { + /*! + * darwin_fd_limit exists to work around an issue where launchctl on Mac OS X defaults the + * rlimit maxfiles to 256/unlimited. The default soft limit of 256 ends up being far too low + * for our multithreaded scheduler testing, depending on the number of cores available. + * + * This fixes issue #7772. + */ + + use libc; + type rlim_t = libc::uint64_t; + struct rlimit { + rlim_cur: rlim_t, + rlim_max: rlim_t + } + #[nolink] + extern { + // name probably doesn't need to be mut, but the C function doesn't specify const + fn sysctl(name: *mut libc::c_int, namelen: libc::c_uint, + oldp: *mut libc::c_void, oldlenp: *mut libc::size_t, + newp: *mut libc::c_void, newlen: libc::size_t) -> libc::c_int; + fn getrlimit(resource: libc::c_int, rlp: *mut rlimit) -> libc::c_int; + fn setrlimit(resource: libc::c_int, rlp: *rlimit) -> libc::c_int; + } + static CTL_KERN: libc::c_int = 1; + static KERN_MAXFILESPERPROC: libc::c_int = 29; + static RLIMIT_NOFILE: libc::c_int = 8; + + pub unsafe fn raise_fd_limit() { + // The strategy here is to fetch the current resource limits, read the kern.maxfilesperproc + // sysctl value, and bump the soft resource limit for maxfiles up to the sysctl value. + use ptr::{to_unsafe_ptr, to_mut_unsafe_ptr, mut_null}; + use mem::size_of_val; + use os::last_os_error; + + // Fetch the kern.maxfilesperproc value + let mut mib: [libc::c_int, ..2] = [CTL_KERN, KERN_MAXFILESPERPROC]; + let mut maxfiles: libc::c_int = 0; + let mut size: libc::size_t = size_of_val(&maxfiles) as libc::size_t; + if sysctl(to_mut_unsafe_ptr(&mut mib[0]), 2, + to_mut_unsafe_ptr(&mut maxfiles) as *mut libc::c_void, + to_mut_unsafe_ptr(&mut size), + mut_null(), 0) != 0 { + let err = last_os_error(); + error!("raise_fd_limit: error calling sysctl: {}", err); + return; + } + + // Fetch the current resource limits + let mut rlim = rlimit{rlim_cur: 0, rlim_max: 0}; + if getrlimit(RLIMIT_NOFILE, to_mut_unsafe_ptr(&mut rlim)) != 0 { + let err = last_os_error(); + error!("raise_fd_limit: error calling getrlimit: {}", err); + return; + } + + // Bump the soft limit to the smaller of kern.maxfilesperproc and the hard limit + rlim.rlim_cur = ::cmp::min(maxfiles as rlim_t, rlim.rlim_max); + + // Set our newly-increased resource limit + if setrlimit(RLIMIT_NOFILE, to_unsafe_ptr(&rlim)) != 0 { + let err = last_os_error(); + error!("raise_fd_limit: error calling setrlimit: {}", err); + return; + } + } +} + +#[cfg(not(target_os="macos"))] +mod darwin_fd_limit { + pub unsafe fn raise_fd_limit() {} +} diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 91e285b1061..c0e1086483d 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -38,8 +38,13 @@ use task::{TaskResult, TaskOpts}; use unstable::finally::Finally; use unstable::mutex::{Mutex, MUTEX_INIT}; -#[cfg(stage0)] pub use rt::unwind::begin_unwind; +#[cfg(stage0)] +pub use rt::unwind::begin_unwind; +// These two statics are used as bookeeping to keep track of the rust runtime's +// count of threads. In 1:1 contexts, this is used to know when to return from +// the main function, and in M:N contexts this is used to know when to shut down +// the pool of schedulers. static mut TASK_COUNT: AtomicUint = INIT_ATOMIC_UINT; static mut TASK_LOCK: Mutex = MUTEX_INIT; @@ -181,10 +186,15 @@ impl Task { // Cleanup the dynamic borrowck debugging info borrowck::clear_task_borrow_list(); - // TODO: dox + // Here we must unsafely borrow the task in order to not remove it from + // TLS. When collecting failure, we may attempt to send on a channel (or + // just run aribitrary code), so we must be sure to still have a local + // task in TLS. unsafe { let me: *mut Task = Local::unsafe_borrow(); (*me).death.collect_failure((*me).unwinder.result()); + + // see comments on these statics for why they're used if TASK_COUNT.fetch_sub(1, SeqCst) == 1 { TASK_LOCK.lock(); TASK_LOCK.signal(); @@ -386,6 +396,10 @@ impl Drop for Death { } } +/// The main function of all rust executables will by default use this function. +/// This function will *block* the OS thread (hence the `unsafe`) waiting for +/// all known tasks to complete. Once this function has returned, it is +/// guaranteed that no more user-defined code is still running. pub unsafe fn wait_for_completion() { TASK_LOCK.lock(); while TASK_COUNT.load(SeqCst) > 0 { diff --git a/src/libstd/sync/arc.rs b/src/libstd/sync/arc.rs index 7b94a3acc2b..b405104c09a 100644 --- a/src/libstd/sync/arc.rs +++ b/src/libstd/sync/arc.rs @@ -32,7 +32,7 @@ use vec; /// An atomically reference counted pointer. /// /// Enforces no shared-memory safety. -#[unsafe_no_drop_flag] +//#[unsafe_no_drop_flag] FIXME: #9758 pub struct UnsafeArc { priv data: *mut ArcData, } diff --git a/src/libsyntax/ext/build.rs b/src/libsyntax/ext/build.rs index 930d25e7443..aa7e0d0eced 100644 --- a/src/libsyntax/ext/build.rs +++ b/src/libsyntax/ext/build.rs @@ -606,7 +606,6 @@ impl AstBuilder for @ExtCtxt { ~[ self.ident_of("std"), self.ident_of("rt"), - self.ident_of("task"), self.ident_of("begin_unwind"), ], ~[ diff --git a/src/test/run-pass/core-rt-smoke.rs b/src/test/run-pass/core-rt-smoke.rs deleted file mode 100644 index 6e3d9629da0..00000000000 --- a/src/test/run-pass/core-rt-smoke.rs +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// xfail-fast - -// A simple test of starting the runtime manually - -#[start] -fn start(argc: int, argv: **u8) -> int { - do std::rt::start(argc, argv) { - info!("creating my own runtime is joy"); - } -} diff --git a/src/test/run-pass/native-print-no-uv.rs b/src/test/run-pass/native-print-no-uv.rs deleted file mode 100644 index d3b6d605984..00000000000 --- a/src/test/run-pass/native-print-no-uv.rs +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// xfail-fast - -#[no_uv]; - -fn main() { - println!("hello"); -} diff --git a/src/test/run-pass/rt-run-twice.rs b/src/test/run-pass/rt-run-twice.rs deleted file mode 100644 index a9a26c2fbb1..00000000000 --- a/src/test/run-pass/rt-run-twice.rs +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// xfail-fast make-check does not like `#[start]` - -use std::rt; - -#[start] -fn start(argc: int, argv: **u8) -> int { - do rt::start(argc, argv) { - println("First invocation"); - }; - - do rt::start(argc, argv) { - println("Second invocation"); - }; - - 0 -} diff --git a/src/test/run-pass/rt-start-main-thread.rs b/src/test/run-pass/rt-start-main-thread.rs deleted file mode 100644 index 47a723ce6e1..00000000000 --- a/src/test/run-pass/rt-start-main-thread.rs +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// xfail-fast - -#[start] -fn start(argc: int, argv: **u8) -> int { - do std::rt::start_on_main_thread(argc, argv) { - info!("running on main thread"); - do spawn { - info!("running on another thread"); - } - } -} diff --git a/src/test/run-pass/spawning-with-debug.rs b/src/test/run-pass/spawning-with-debug.rs index 76975d15c1d..f8094f9fdb9 100644 --- a/src/test/run-pass/spawning-with-debug.rs +++ b/src/test/run-pass/spawning-with-debug.rs @@ -17,6 +17,5 @@ use std::task; fn main() { let mut t = task::task(); - t.sched_mode(task::SingleThreaded); t.spawn(proc() ()); } diff --git a/src/test/run-pass/use.rs b/src/test/run-pass/use.rs index ddd4b10fd5c..56ce5397efb 100644 --- a/src/test/run-pass/use.rs +++ b/src/test/run-pass/use.rs @@ -10,6 +10,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// xfail-fast + #[allow(unused_imports)]; #[no_std]; @@ -25,4 +27,5 @@ mod baz { pub use x = std::str; } -pub fn main() { } +#[start] +pub fn start(_: int, _: **u8) -> int { 3 } -- cgit 1.4.1-3-g733a5 From 51c03c1f35f6b076928a1e5b94ec81e6d00c3ac2 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Sun, 15 Dec 2013 00:42:21 -0800 Subject: green: Properly wait for main before shutdown There was a race in the code previously where schedulers would *immediately* shut down after spawning the main task (because the global task count would still be 0). This fixes the logic by blocking the sched pool task in receving on a port instead of spawning a task into the pool to receive on a port. The modifications necessary were to have a "simple task" running by the time the code is executing, but this is a simple enough thing to implement and I forsee this being necessary to have implemented in the future anyway. --- src/libgreen/lib.rs | 49 ++++++++++++++++++++++---------- src/libgreen/simple.rs | 77 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/libnative/lib.rs | 25 +++++++++------- src/libnative/task.rs | 10 ++----- src/libstd/rt/task.rs | 27 ++++++++++-------- 5 files changed, 143 insertions(+), 45 deletions(-) create mode 100644 src/libgreen/simple.rs (limited to 'src/libstd/rt') diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs index 57e2a0bfe16..7318eaaf679 100644 --- a/src/libgreen/lib.rs +++ b/src/libgreen/lib.rs @@ -33,7 +33,9 @@ use std::os; use std::rt::crate_map; +use std::rt::local::Local; use std::rt::rtio; +use std::rt::task::Task; use std::rt::thread::Thread; use std::rt; use std::sync::atomics::{SeqCst, AtomicUint, INIT_ATOMIC_UINT}; @@ -41,7 +43,6 @@ use std::sync::deque; use std::task::TaskOpts; use std::util; use std::vec; -use stdtask = std::rt::task; use sched::{Shutdown, Scheduler, SchedHandle, TaskFromFriend, NewNeighbor}; use sleeper_list::SleeperList; @@ -49,6 +50,7 @@ use stack::StackPool; use task::GreenTask; mod macros; +mod simple; pub mod basic; pub mod context; @@ -61,16 +63,20 @@ pub mod task; #[lang = "start"] pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { use std::cast; - do start(argc, argv) { - let main: extern "Rust" fn() = unsafe { cast::transmute(main) }; - main(); - } + let mut ret = None; + simple::task().run(|| { + ret = Some(do start(argc, argv) { + let main: extern "Rust" fn() = unsafe { cast::transmute(main) }; + main(); + }) + }); + ret.unwrap() } /// Set up a default runtime configuration, given compiler-supplied arguments. /// -/// This function will block the current thread of execution until the entire -/// pool of M:N schedulers have exited. +/// This function will block until the entire pool of M:N schedulers have +/// exited. This function also requires a local task to be available. /// /// # Arguments /// @@ -95,24 +101,37 @@ pub fn start(argc: int, argv: **u8, main: proc()) -> int { /// Execute the main function in a pool of M:N schedulers. /// -/// Configures the runtime according to the environment, by default -/// using a task scheduler with the same number of threads as cores. -/// Returns a process exit code. +/// Configures the runtime according to the environment, by default using a task +/// scheduler with the same number of threads as cores. Returns a process exit +/// code. /// /// This function will not return until all schedulers in the associated pool /// have returned. pub fn run(main: proc()) -> int { + // Create a scheduler pool and spawn the main task into this pool. We will + // get notified over a channel when the main task exits. let mut pool = SchedPool::new(PoolConfig::new()); let (port, chan) = Chan::new(); let mut opts = TaskOpts::new(); opts.notify_chan = Some(chan); pool.spawn(opts, main); - do pool.spawn(TaskOpts::new()) { - if port.recv().is_err() { - os::set_exit_status(rt::DEFAULT_ERROR_CODE); - } + + // Wait for the main task to return, and set the process error code + // appropriately. + if port.recv().is_err() { + os::set_exit_status(rt::DEFAULT_ERROR_CODE); } - unsafe { stdtask::wait_for_completion(); } + + // Once the main task has exited and we've set our exit code, wait for all + // spawned sub-tasks to finish running. This is done to allow all schedulers + // to remain active while there are still tasks possibly running. + unsafe { + let mut task = Local::borrow(None::); + task.get().wait_for_other_tasks(); + } + + // Now that we're sure all tasks are dead, shut down the pool of schedulers, + // waiting for them all to return. pool.shutdown(); os::get_exit_status() } diff --git a/src/libgreen/simple.rs b/src/libgreen/simple.rs new file mode 100644 index 00000000000..6fd2c436b2e --- /dev/null +++ b/src/libgreen/simple.rs @@ -0,0 +1,77 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! A small module implementing a simple "runtime" used for bootstrapping a rust +//! scheduler pool and then interacting with it. + +use std::cast; +use std::rt::Runtime; +use std::task::TaskOpts; +use std::rt::rtio; +use std::rt::local::Local; +use std::rt::task::{Task, BlockedTask}; +use std::unstable::sync::LittleLock; + +struct SimpleTask { + lock: LittleLock, +} + +impl Runtime for SimpleTask { + // Implement the simple tasks of descheduling and rescheduling, but only in + // a simple number of cases. + fn deschedule(mut ~self, times: uint, mut cur_task: ~Task, + f: |BlockedTask| -> Result<(), BlockedTask>) { + assert!(times == 1); + + let my_lock: *mut LittleLock = &mut self.lock; + cur_task.put_runtime(self as ~Runtime); + + unsafe { + let cur_task_dupe = *cast::transmute::<&~Task, &uint>(&cur_task); + let task = BlockedTask::block(cur_task); + + let mut guard = (*my_lock).lock(); + match f(task) { + Ok(()) => guard.wait(), + Err(task) => { cast::forget(task.wake()); } + } + drop(guard); + cur_task = cast::transmute::(cur_task_dupe); + } + Local::put(cur_task); + } + fn reawaken(mut ~self, mut to_wake: ~Task) { + let lock: *mut LittleLock = &mut self.lock; + to_wake.put_runtime(self as ~Runtime); + unsafe { + cast::forget(to_wake); + let _l = (*lock).lock(); + (*lock).signal(); + } + } + + // These functions are all unimplemented and fail as a result. This is on + // purpose. A "simple task" is just that, a very simple task that can't + // really do a whole lot. The only purpose of the task is to get us off our + // feet and running. + fn yield_now(~self, _cur_task: ~Task) { fail!() } + fn maybe_yield(~self, _cur_task: ~Task) { fail!() } + fn spawn_sibling(~self, _cur_task: ~Task, _opts: TaskOpts, _f: proc()) { + fail!() + } + fn local_io<'a>(&'a mut self) -> Option> { None } + fn wrap(~self) -> ~Any { fail!() } +} + +pub fn task() -> ~Task { + let mut task = ~Task::new(); + task.put_runtime(~SimpleTask { lock: LittleLock::new() } as ~Runtime); + return task; +} diff --git a/src/libnative/lib.rs b/src/libnative/lib.rs index 44b66a7804d..60ae239ee97 100644 --- a/src/libnative/lib.rs +++ b/src/libnative/lib.rs @@ -33,15 +33,16 @@ // answer is that you don't need them) use std::os; +use std::rt::local::Local; +use std::rt::task::Task; use std::rt; -use stdtask = std::rt::task; pub mod io; pub mod task; // XXX: this should not exist here -#[cfg(stage0, notready)] +#[cfg(stage0)] #[lang = "start"] pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { use std::cast; @@ -72,9 +73,13 @@ pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { /// exited. pub fn start(argc: int, argv: **u8, main: proc()) -> int { rt::init(argc, argv); - let exit_code = run(main); + let mut exit_code = None; + let mut main = Some(main); + task::new().run(|| { + exit_code = Some(run(main.take_unwrap())); + }); unsafe { rt::cleanup(); } - return exit_code; + return exit_code.unwrap(); } /// Executes a procedure on the current thread in a Rust task context. @@ -82,11 +87,11 @@ pub fn start(argc: int, argv: **u8, main: proc()) -> int { /// This function has all of the same details as `start` except for a different /// number of arguments. pub fn run(main: proc()) -> int { - // Create a task, run the procedure in it, and then wait for everything. - task::run(task::new(), main); - - // Block this OS task waiting for everything to finish. - unsafe { stdtask::wait_for_completion() } - + // Run the main procedure and then wait for everything to finish + main(); + unsafe { + let mut task = Local::borrow(None::); + task.get().wait_for_other_tasks(); + } os::get_exit_status() } diff --git a/src/libnative/task.rs b/src/libnative/task.rs index 48768def067..0d5e08979ca 100644 --- a/src/libnative/task.rs +++ b/src/libnative/task.rs @@ -77,17 +77,11 @@ pub fn spawn_opts(opts: TaskOpts, f: proc()) { stack::record_stack_bounds(my_stack - stack + 1024, my_stack); } - run(task, f); + let mut f = Some(f); + task.run(|| { f.take_unwrap()() }); }) } -/// Runs a task once, consuming the task. The given procedure is run inside of -/// the task. -pub fn run(t: ~Task, f: proc()) { - let mut f = Some(f); - t.run(|| { f.take_unwrap()(); }); -} - // This structure is the glue between channels and the 1:1 scheduling mode. This // structure is allocated once per task. struct Ops { diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index c0e1086483d..765f0b427cd 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -292,6 +292,21 @@ impl Task { pub fn local_io<'a>(&'a mut self) -> Option> { self.imp.get_mut_ref().local_io() } + + /// The main function of all rust executables will by default use this + /// function. This function will *block* the OS thread (hence the `unsafe`) + /// waiting for all known tasks to complete. Once this function has + /// returned, it is guaranteed that no more user-defined code is still + /// running. + pub unsafe fn wait_for_other_tasks(&mut self) { + TASK_COUNT.fetch_sub(1, SeqCst); // don't count ourselves + TASK_LOCK.lock(); + while TASK_COUNT.load(SeqCst) > 0 { + TASK_LOCK.wait(); + } + TASK_LOCK.unlock(); + TASK_COUNT.fetch_add(1, SeqCst); // add ourselves back in + } } impl Drop for Task { @@ -396,18 +411,6 @@ impl Drop for Death { } } -/// The main function of all rust executables will by default use this function. -/// This function will *block* the OS thread (hence the `unsafe`) waiting for -/// all known tasks to complete. Once this function has returned, it is -/// guaranteed that no more user-defined code is still running. -pub unsafe fn wait_for_completion() { - TASK_LOCK.lock(); - while TASK_COUNT.load(SeqCst) > 0 { - TASK_LOCK.wait(); - } - TASK_LOCK.unlock(); -} - #[cfg(test)] mod test { use super::*; -- cgit 1.4.1-3-g733a5 From 1c4af5e3d93fe2953c31f8a76ee2aed15069204a Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Wed, 18 Dec 2013 10:14:44 -0800 Subject: rustuv: Remove the id() function from IoFactory The only user of this was the homing code in librustuv, and it just manually does the cast from a pointer to a uint now. --- src/libnative/io/mod.rs | 3 --- src/librustuv/homing.rs | 38 ++++++++++++++++++++------------------ src/librustuv/lib.rs | 7 ++----- src/librustuv/uvio.rs | 7 ++++--- src/librustuv/uvll.rs | 2 +- src/libstd/rt/rtio.rs | 2 -- 6 files changed, 27 insertions(+), 32 deletions(-) (limited to 'src/libstd/rt') diff --git a/src/libnative/io/mod.rs b/src/libnative/io/mod.rs index 36e3f8af190..32056215e7c 100644 --- a/src/libnative/io/mod.rs +++ b/src/libnative/io/mod.rs @@ -111,9 +111,6 @@ fn mkerr_winbool(ret: libc::c_int) -> IoResult<()> { pub struct IoFactory; impl rtio::IoFactory for IoFactory { - // all native io factories are the same - fn id(&self) -> uint { 0 } - // networking fn tcp_connect(&mut self, _addr: SocketAddr) -> IoResult<~RtioTcpStream> { Err(unimpl()) diff --git a/src/librustuv/homing.rs b/src/librustuv/homing.rs index d7be06724a0..16534b7b38b 100644 --- a/src/librustuv/homing.rs +++ b/src/librustuv/homing.rs @@ -33,6 +33,7 @@ #[allow(dead_code)]; +use std::cast; use std::rt::local::Local; use std::rt::rtio::LocalIo; use std::rt::task::{Task, BlockedTask}; @@ -70,6 +71,17 @@ impl Clone for HomeHandle { } } +pub fn local_id() -> uint { + let mut io = match LocalIo::borrow() { + Some(io) => io, None => return 0, + }; + let io = io.get(); + unsafe { + let (_vtable, ptr): (uint, uint) = cast::transmute(io); + return ptr; + } +} + pub trait HomingIO { fn home<'r>(&'r mut self) -> &'r mut HomeHandle; @@ -79,35 +91,26 @@ pub trait HomingIO { fn go_to_IO_home(&mut self) -> uint { let _f = ForbidUnwind::new("going home"); - let mut cur_task: ~Task = Local::take(); - let cur_loop_id = { - let mut io = cur_task.local_io().expect("libuv must have I/O"); - io.get().id() - }; + let cur_loop_id = local_id(); + let destination = self.home().id; // Try at all costs to avoid the homing operation because it is quite // expensive. Hence, we only deschedule/send if we're not on the correct // event loop. If we're already on the home event loop, then we're good // to go (remember we have no preemption, so we're guaranteed to stay on // this event loop as long as we avoid the scheduler). - if cur_loop_id != self.home().id { + if cur_loop_id != destination { + let cur_task: ~Task = Local::take(); cur_task.deschedule(1, |task| { self.home().send(task); Ok(()) }); // Once we wake up, assert that we're in the right location - let cur_loop_id = { - let mut io = LocalIo::borrow().expect("libuv must have I/O"); - io.get().id() - }; - assert_eq!(cur_loop_id, self.home().id); - - cur_loop_id - } else { - Local::put(cur_task); - cur_loop_id + assert_eq!(local_id(), destination); } + + return destination; } /// Fires a single homing missile, returning another missile targeted back @@ -130,8 +133,7 @@ impl HomingMissile { /// Check at runtime that the task has *not* transplanted itself to a /// different I/O loop while executing. pub fn check(&self, msg: &'static str) { - let mut io = LocalIo::borrow().expect("libuv must have I/O"); - assert!(io.get().id() == self.io_home, "{}", msg); + assert!(local_id() == self.io_home, "{}", msg); } } diff --git a/src/librustuv/lib.rs b/src/librustuv/lib.rs index 49d695ea3fb..2ef10dd33ac 100644 --- a/src/librustuv/lib.rs +++ b/src/librustuv/lib.rs @@ -53,7 +53,6 @@ use std::ptr::null; use std::ptr; use std::rt::local::Local; use std::rt::task::{BlockedTask, Task}; -use std::rt::rtio::LocalIo; use std::str::raw::from_c_str; use std::str; use std::task; @@ -161,18 +160,16 @@ pub struct ForbidSwitch { impl ForbidSwitch { fn new(s: &'static str) -> ForbidSwitch { - let mut io = LocalIo::borrow().expect("libuv must have local I/O"); ForbidSwitch { msg: s, - io: io.get().id(), + io: homing::local_id(), } } } impl Drop for ForbidSwitch { fn drop(&mut self) { - let mut io = LocalIo::borrow().expect("libuv must have local I/O"); - assert!(self.io == io.get().id(), + assert!(self.io == homing::local_id(), "didnt want a scheduler switch: {}", self.msg); } diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 322bead8be4..9e7343aa2da 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -132,13 +132,14 @@ impl UvIoFactory { pub fn uv_loop<'a>(&mut self) -> *uvll::uv_loop_t { self.loop_.handle } pub fn make_handle(&mut self) -> HomeHandle { - HomeHandle::new(self.id(), &mut **self.handle_pool.get_mut_ref()) + // It's understood by the homing code that the "local id" is just the + // pointer of the local I/O factory cast to a uint. + let id: uint = unsafe { cast::transmute_copy(&self) }; + HomeHandle::new(id, &mut **self.handle_pool.get_mut_ref()) } } impl IoFactory for UvIoFactory { - fn id(&self) -> uint { unsafe { cast::transmute(self) } } - // Connect to an address and return a new stream // NB: This blocks the task waiting on the connection. // It would probably be better to return a future diff --git a/src/librustuv/uvll.rs b/src/librustuv/uvll.rs index fa0bb85faed..ad5fad99f20 100644 --- a/src/librustuv/uvll.rs +++ b/src/librustuv/uvll.rs @@ -38,7 +38,7 @@ use std::libc; use std::libc::uintptr_t; pub use self::errors::{EACCES, ECONNREFUSED, ECONNRESET, EPIPE, ECONNABORTED, - ECANCELED, EBADF, ENOTCONN}; + ECANCELED, EBADF, ENOTCONN, ENOENT}; pub static OK: c_int = 0; pub static EOF: c_int = -4095; diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index c1c40cc6dff..97b08cc18ca 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -150,8 +150,6 @@ impl<'a> LocalIo<'a> { } pub trait IoFactory { - fn id(&self) -> uint; - // networking fn tcp_connect(&mut self, addr: SocketAddr) -> Result<~RtioTcpStream, IoError>; fn tcp_bind(&mut self, addr: SocketAddr) -> Result<~RtioTcpListener, IoError>; -- cgit 1.4.1-3-g733a5 From 6cad8f4f14da1dd529100779db74b03d6db20faf Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Wed, 18 Dec 2013 09:57:58 -0800 Subject: Test fixes and rebase conflicts * vec::raw::to_ptr is gone * Pausible => Pausable * Removing @ * Calling the main task "
" * Removing unused imports * Removing unused mut * Bringing some libextra tests up to date * Allowing compiletest to work at stage0 * Fixing the bootstrap-from-c rmake tests * assert => rtassert in a few cases * printing to stderr instead of stdout in fail!() --- src/compiletest/compiletest.rs | 1 + src/libextra/comm.rs | 11 +- src/libextra/sync.rs | 32 ++-- src/libextra/task_pool.rs | 2 - src/libgreen/basic.rs | 2 +- src/libgreen/lib.rs | 29 ++- src/libgreen/macros.rs | 3 +- src/libgreen/sched.rs | 7 +- src/libnative/io/process.rs | 3 +- src/libnative/lib.rs | 13 +- src/librustc/back/link.rs | 12 +- src/librustpkg/tests.rs | 9 +- src/librustuv/file.rs | 1 - src/librustuv/idle.rs | 2 +- src/librustuv/macros.rs | 3 +- src/librustuv/signal.rs | 2 +- src/librustuv/timer.rs | 2 +- src/librustuv/uvio.rs | 6 +- src/libstd/io/net/unix.rs | 3 +- src/libstd/io/stdio.rs | 3 +- src/libstd/io/test.rs | 1 + src/libstd/rt/local_ptr.rs | 6 +- src/libstd/rt/mod.rs | 1 + src/libstd/rt/rtio.rs | 18 +- src/libstd/rt/task.rs | 8 +- src/libstd/rt/thread.rs | 4 - src/libstd/rt/unwind.rs | 202 ++++++++++++--------- src/libstd/rt/util.rs | 3 +- src/libstd/sync/arc.rs | 2 +- src/libstd/unstable/stack.rs | 2 + src/test/bench/rt-messaging-ping-pong.rs | 6 +- src/test/bench/rt-parfib.rs | 3 +- src/test/bench/shootout-spectralnorm.rs | 2 + src/test/compile-fail/std-uncopyable-atomics.rs | 2 +- .../run-make/bootstrap-from-c-with-green/Makefile | 9 + .../run-make/bootstrap-from-c-with-green/lib.rs | 25 +++ .../run-make/bootstrap-from-c-with-green/main.c | 16 ++ .../run-make/bootstrap-from-c-with-native/Makefile | 9 + .../run-make/bootstrap-from-c-with-native/lib.rs | 24 +++ .../run-make/bootstrap-from-c-with-native/main.c | 16 ++ .../run-make/bootstrap-from-c-with-uvio/Makefile | 9 - .../run-make/bootstrap-from-c-with-uvio/lib.rs | 25 --- .../run-make/bootstrap-from-c-with-uvio/main.c | 16 -- src/test/run-pass/use.rs | 2 +- 44 files changed, 320 insertions(+), 237 deletions(-) create mode 100644 src/test/run-make/bootstrap-from-c-with-green/Makefile create mode 100644 src/test/run-make/bootstrap-from-c-with-green/lib.rs create mode 100644 src/test/run-make/bootstrap-from-c-with-green/main.c create mode 100644 src/test/run-make/bootstrap-from-c-with-native/Makefile create mode 100644 src/test/run-make/bootstrap-from-c-with-native/lib.rs create mode 100644 src/test/run-make/bootstrap-from-c-with-native/main.c delete mode 100644 src/test/run-make/bootstrap-from-c-with-uvio/Makefile delete mode 100644 src/test/run-make/bootstrap-from-c-with-uvio/lib.rs delete mode 100644 src/test/run-make/bootstrap-from-c-with-uvio/main.c (limited to 'src/libstd/rt') diff --git a/src/compiletest/compiletest.rs b/src/compiletest/compiletest.rs index 89b6f06abfc..ae7d1a30a84 100644 --- a/src/compiletest/compiletest.rs +++ b/src/compiletest/compiletest.rs @@ -13,6 +13,7 @@ #[allow(non_camel_case_types)]; #[deny(warnings)]; +#[cfg(stage0)] extern mod green; extern mod extra; use std::os; diff --git a/src/libextra/comm.rs b/src/libextra/comm.rs index c3b17fe9964..52b5bedb7ea 100644 --- a/src/libextra/comm.rs +++ b/src/libextra/comm.rs @@ -96,7 +96,6 @@ pub fn rendezvous() -> (SyncPort, SyncChan) { #[cfg(test)] mod test { use comm::{DuplexStream, rendezvous}; - use std::rt::test::run_in_uv_task; #[test] @@ -124,13 +123,11 @@ mod test { #[test] fn recv_a_lot() { // Rendezvous streams should be able to handle any number of messages being sent - do run_in_uv_task { - let (port, chan) = rendezvous(); - do spawn { - 1000000.times(|| { chan.send(()) }) - } - 1000000.times(|| { port.recv() }) + let (port, chan) = rendezvous(); + do spawn { + 1000000.times(|| { chan.send(()) }) } + 1000000.times(|| { port.recv() }) } #[test] diff --git a/src/libextra/sync.rs b/src/libextra/sync.rs index 2a53775a907..f43329076c8 100644 --- a/src/libextra/sync.rs +++ b/src/libextra/sync.rs @@ -761,23 +761,21 @@ mod tests { fn test_sem_runtime_friendly_blocking() { // Force the runtime to schedule two threads on the same sched_loop. // When one blocks, it should schedule the other one. - do task::spawn_sched(task::SingleThreaded) { - let s = Semaphore::new(1); - let s2 = s.clone(); - let (p, c) = Chan::new(); - let mut child_data = Some((s2, c)); - s.access(|| { - let (s2, c) = child_data.take_unwrap(); - do task::spawn { - c.send(()); - s2.access(|| { }); - c.send(()); - } - let _ = p.recv(); // wait for child to come alive - 5.times(|| { task::deschedule(); }); // let the child contend - }); - let _ = p.recv(); // wait for child to be done - } + let s = Semaphore::new(1); + let s2 = s.clone(); + let (p, c) = Chan::new(); + let mut child_data = Some((s2, c)); + s.access(|| { + let (s2, c) = child_data.take_unwrap(); + do task::spawn { + c.send(()); + s2.access(|| { }); + c.send(()); + } + let _ = p.recv(); // wait for child to come alive + 5.times(|| { task::deschedule(); }); // let the child contend + }); + let _ = p.recv(); // wait for child to be done } /************************************************************************ * Mutex tests diff --git a/src/libextra/task_pool.rs b/src/libextra/task_pool.rs index 649a9a06644..ba38f876287 100644 --- a/src/libextra/task_pool.rs +++ b/src/libextra/task_pool.rs @@ -17,8 +17,6 @@ use std::task; use std::vec; -#[cfg(test)] use std::task::SingleThreaded; - enum Msg { Execute(proc(&T)), Quit diff --git a/src/libgreen/basic.rs b/src/libgreen/basic.rs index e1e489a2a2b..0574792c18d 100644 --- a/src/libgreen/basic.rs +++ b/src/libgreen/basic.rs @@ -16,7 +16,7 @@ //! loop if no other one is provided (and M:N scheduling is desired). use std::cast; -use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausibleIdleCallback, +use std::rt::rtio::{EventLoop, IoFactory, RemoteCallback, PausableIdleCallback, Callback}; use std::unstable::sync::Exclusive; use std::util; diff --git a/src/libgreen/lib.rs b/src/libgreen/lib.rs index 7318eaaf679..3a2e8a2b36c 100644 --- a/src/libgreen/lib.rs +++ b/src/libgreen/lib.rs @@ -18,12 +18,7 @@ //! functionality inside of 1:1 programs. #[pkgid = "green#0.9-pre"]; -#[link(name = "green", - package_id = "green", - vers = "0.9-pre", - uuid = "20c38f8c-bfea-83ed-a068-9dc05277be26", - url = "https://github.com/mozilla/rust/tree/master/src/libgreen")]; - +#[crate_id = "green#0.9-pre"]; #[license = "MIT/ASL2"]; #[crate_type = "rlib"]; #[crate_type = "dylib"]; @@ -61,16 +56,13 @@ pub mod stack; pub mod task; #[lang = "start"] +#[cfg(not(test))] pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { use std::cast; - let mut ret = None; - simple::task().run(|| { - ret = Some(do start(argc, argv) { - let main: extern "Rust" fn() = unsafe { cast::transmute(main) }; - main(); - }) - }); - ret.unwrap() + do start(argc, argv) { + let main: extern "Rust" fn() = unsafe { cast::transmute(main) }; + main(); + } } /// Set up a default runtime configuration, given compiler-supplied arguments. @@ -93,10 +85,14 @@ pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { /// error. pub fn start(argc: int, argv: **u8, main: proc()) -> int { rt::init(argc, argv); - let exit_code = run(main); + let mut main = Some(main); + let mut ret = None; + simple::task().run(|| { + ret = Some(run(main.take_unwrap())); + }); // unsafe is ok b/c we're sure that the runtime is gone unsafe { rt::cleanup() } - exit_code + ret.unwrap() } /// Execute the main function in a pool of M:N schedulers. @@ -114,6 +110,7 @@ pub fn run(main: proc()) -> int { let (port, chan) = Chan::new(); let mut opts = TaskOpts::new(); opts.notify_chan = Some(chan); + opts.name = Some(SendStrStatic("
")); pool.spawn(opts, main); // Wait for the main task to return, and set the process error code diff --git a/src/libgreen/macros.rs b/src/libgreen/macros.rs index ad0854e2b1e..56dc3204da8 100644 --- a/src/libgreen/macros.rs +++ b/src/libgreen/macros.rs @@ -54,14 +54,13 @@ macro_rules! rtabort ( pub fn dumb_println(args: &fmt::Arguments) { use std::io; use std::libc; - use std::vec; struct Stderr; impl io::Writer for Stderr { fn write(&mut self, data: &[u8]) { unsafe { libc::write(libc::STDERR_FILENO, - vec::raw::to_ptr(data) as *libc::c_void, + data.as_ptr() as *libc::c_void, data.len() as libc::size_t); } } diff --git a/src/libgreen/sched.rs b/src/libgreen/sched.rs index 95c4d8347d5..ef62f654ddf 100644 --- a/src/libgreen/sched.rs +++ b/src/libgreen/sched.rs @@ -11,7 +11,7 @@ use std::cast; use std::rand::{XorShiftRng, Rng, Rand}; use std::rt::local::Local; -use std::rt::rtio::{RemoteCallback, PausibleIdleCallback, Callback, EventLoop}; +use std::rt::rtio::{RemoteCallback, PausableIdleCallback, Callback, EventLoop}; use std::rt::task::BlockedTask; use std::rt::task::Task; use std::sync::deque; @@ -779,6 +779,9 @@ impl Scheduler { /// randomness is a result of performing a round of work stealing (which /// may end up stealing from the current scheduler). pub fn yield_now(mut ~self, cur: ~GreenTask) { + // Async handles trigger the scheduler by calling yield_now on the local + // task, which eventually gets us to here. See comments in SchedRunner + // for more info on this. if cur.is_sched() { assert!(self.sched_task.is_none()); self.run_sched_once(cur); @@ -1345,7 +1348,7 @@ mod test { impl Drop for S { fn drop(&mut self) { - let _foo = @0; + let _foo = ~0; } } diff --git a/src/libnative/io/process.rs b/src/libnative/io/process.rs index 2277d408ee4..64ce9d7e348 100644 --- a/src/libnative/io/process.rs +++ b/src/libnative/io/process.rs @@ -8,7 +8,6 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cast; use std::io; use std::libc::{pid_t, c_void, c_int}; use std::libc; @@ -17,6 +16,8 @@ use std::ptr; use std::rt::rtio; use p = std::io::process; +#[cfg(windows)] use std::cast; + use super::file; /** diff --git a/src/libnative/lib.rs b/src/libnative/lib.rs index 60ae239ee97..e0666592651 100644 --- a/src/libnative/lib.rs +++ b/src/libnative/lib.rs @@ -15,12 +15,7 @@ //! version of I/O. #[pkgid = "native#0.9-pre"]; -#[link(name = "native", - package_id = "native", - vers = "0.9-pre", - uuid = "535344a7-890f-5a23-e1f3-e0d118805141", - url = "https://github.com/mozilla/rust/tree/master/src/native")]; - +#[crate_id = "native#0.9-pre"]; #[license = "MIT/ASL2"]; #[crate_type = "rlib"]; #[crate_type = "dylib"]; @@ -46,7 +41,7 @@ pub mod task; #[lang = "start"] pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { use std::cast; - use std::task::try; + use std::task; do start(argc, argv) { // Instead of invoking main directly on this thread, invoke it on @@ -55,7 +50,9 @@ pub fn lang_start(main: *u8, argc: int, argv: **u8) -> int { // of the main thread's stack, so for stack overflow detection to work // we must spawn the task in a subtask which we know the stack size of. let main: extern "Rust" fn() = unsafe { cast::transmute(main) }; - match do try { main() } { + let mut task = task::task(); + task.name("
"); + match do task.try { main() } { Ok(()) => { os::set_exit_status(0); } Err(..) => { os::set_exit_status(rt::DEFAULT_ERROR_CODE); } } diff --git a/src/librustc/back/link.rs b/src/librustc/back/link.rs index 0cf91fbba0e..214f60291fe 100644 --- a/src/librustc/back/link.rs +++ b/src/librustc/back/link.rs @@ -333,6 +333,10 @@ pub mod write { } unsafe fn configure_llvm(sess: Session) { + use std::unstable::mutex::{MUTEX_INIT, Mutex}; + static mut LOCK: Mutex = MUTEX_INIT; + static mut CONFIGURED: bool = false; + // Copy what clan does by turning on loop vectorization at O2 and // slp vectorization at O3 let vectorize_loop = !sess.no_vectorize_loops() && @@ -360,7 +364,13 @@ pub mod write { add(*arg); } - llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr()); + LOCK.lock(); + if !CONFIGURED { + llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, + llvm_args.as_ptr()); + CONFIGURED = true; + } + LOCK.unlock(); } unsafe fn populate_llvm_passes(fpm: lib::llvm::PassManagerRef, diff --git a/src/librustpkg/tests.rs b/src/librustpkg/tests.rs index ecf08df5f18..21f18eda140 100644 --- a/src/librustpkg/tests.rs +++ b/src/librustpkg/tests.rs @@ -487,8 +487,9 @@ fn lib_output_file_name(workspace: &Path, short_name: &str) -> Path { } fn output_file_name(workspace: &Path, short_name: ~str) -> Path { - target_build_dir(workspace).join(short_name.as_slice()).join(format!("{}{}", short_name, - os::EXE_SUFFIX)) + target_build_dir(workspace).join(short_name.as_slice()) + .join(format!("{}{}", short_name, + os::consts::EXE_SUFFIX)) } #[cfg(target_os = "linux")] @@ -1353,7 +1354,7 @@ fn test_import_rustpkg() { command_line_test([~"build", ~"foo"], workspace); debug!("workspace = {}", workspace.display()); assert!(target_build_dir(workspace).join("foo").join(format!("pkg{}", - os::EXE_SUFFIX)).exists()); + os::consts::EXE_SUFFIX)).exists()); } #[test] @@ -1366,7 +1367,7 @@ fn test_macro_pkg_script() { command_line_test([~"build", ~"foo"], workspace); debug!("workspace = {}", workspace.display()); assert!(target_build_dir(workspace).join("foo").join(format!("pkg{}", - os::EXE_SUFFIX)).exists()); + os::consts::EXE_SUFFIX)).exists()); } #[test] diff --git a/src/librustuv/file.rs b/src/librustuv/file.rs index 059bf072a1a..82d0fd823a3 100644 --- a/src/librustuv/file.rs +++ b/src/librustuv/file.rs @@ -18,7 +18,6 @@ use std::rt::task::BlockedTask; use std::io::{FileStat, IoError}; use std::io; use std::rt::rtio; -use std::vec; use homing::{HomingIO, HomeHandle}; use super::{Loop, UvError, uv_error_to_io_error, wait_until_woken_after, wakeup}; diff --git a/src/librustuv/idle.rs b/src/librustuv/idle.rs index 44b74d05096..80d21404e4b 100644 --- a/src/librustuv/idle.rs +++ b/src/librustuv/idle.rs @@ -100,7 +100,7 @@ mod test { use std::cast; use std::cell::RefCell; use std::rc::Rc; - use std::rt::rtio::{Callback, PausibleIdleCallback}; + use std::rt::rtio::{Callback, PausableIdleCallback}; use std::rt::task::{BlockedTask, Task}; use std::rt::local::Local; use super::IdleWatcher; diff --git a/src/librustuv/macros.rs b/src/librustuv/macros.rs index 61b4de57655..6c8c16784a1 100644 --- a/src/librustuv/macros.rs +++ b/src/librustuv/macros.rs @@ -30,14 +30,13 @@ macro_rules! uvdebug ( pub fn dumb_println(args: &fmt::Arguments) { use std::io; use std::libc; - use std::vec; struct Stderr; impl io::Writer for Stderr { fn write(&mut self, data: &[u8]) { unsafe { libc::write(libc::STDERR_FILENO, - vec::raw::to_ptr(data) as *libc::c_void, + data.as_ptr() as *libc::c_void, data.len() as libc::size_t); } } diff --git a/src/librustuv/signal.rs b/src/librustuv/signal.rs index 0f81966b169..6772c6d1936 100644 --- a/src/librustuv/signal.rs +++ b/src/librustuv/signal.rs @@ -68,7 +68,7 @@ impl RtioSignal for SignalWatcher {} impl Drop for SignalWatcher { fn drop(&mut self) { let _m = self.fire_homing_missile(); - self.close_async_(); + self.close(); } } diff --git a/src/librustuv/timer.rs b/src/librustuv/timer.rs index e87090753f5..4a0ad44d311 100644 --- a/src/librustuv/timer.rs +++ b/src/librustuv/timer.rs @@ -169,7 +169,7 @@ impl Drop for TimerWatcher { let _action = { let _m = self.fire_homing_missile(); self.stop(); - self.close_async_(); + self.close(); self.action.take() }; } diff --git a/src/librustuv/uvio.rs b/src/librustuv/uvio.rs index 9e7343aa2da..dbf129d0b69 100644 --- a/src/librustuv/uvio.rs +++ b/src/librustuv/uvio.rs @@ -86,10 +86,10 @@ impl rtio::EventLoop for UvEventLoop { IdleWatcher::onetime(&mut self.uvio.loop_, f); } - fn pausible_idle_callback(&mut self, cb: ~rtio::Callback) - -> ~rtio::PausibleIdleCallback + fn pausable_idle_callback(&mut self, cb: ~rtio::Callback) + -> ~rtio::PausableIdleCallback { - IdleWatcher::new(&mut self.uvio.loop_, cb) as ~rtio::PausibleIdleCallback + IdleWatcher::new(&mut self.uvio.loop_, cb) as ~rtio::PausableIdleCallback } fn remote_callback(&mut self, f: ~rtio::Callback) -> ~rtio::RemoteCallback { diff --git a/src/libstd/io/net/unix.rs b/src/libstd/io/net/unix.rs index 59a6903adbf..01b409d4316 100644 --- a/src/libstd/io/net/unix.rs +++ b/src/libstd/io/net/unix.rs @@ -175,7 +175,8 @@ mod tests { fn connect_error() { let mut called = false; io_error::cond.trap(|e| { - assert_eq!(e.kind, OtherIoError); + assert_eq!(e.kind, + if cfg!(windows) {OtherIoError} else {FileNotFound}); called = true; }).inside(|| { let stream = UnixStream::connect(&("path/to/nowhere")); diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index 5249d331f72..1e4fa7968dc 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -34,7 +34,6 @@ use libc; use option::{Option, Some, None}; use result::{Ok, Err}; use rt::rtio::{DontClose, IoFactory, LocalIo, RtioFileStream, RtioTTY}; -use vec; // And so begins the tale of acquiring a uv handle to a stdio stream on all // platforms in all situations. Our story begins by splitting the world into two @@ -137,7 +136,7 @@ fn with_task_stdout(f: |&mut Writer|) { fn write(&mut self, data: &[u8]) { unsafe { libc::write(libc::STDOUT_FILENO, - vec::raw::to_ptr(data) as *libc::c_void, + data.as_ptr() as *libc::c_void, data.len() as libc::size_t); } } diff --git a/src/libstd/io/test.rs b/src/libstd/io/test.rs index e273aedf7cc..4be11227965 100644 --- a/src/libstd/io/test.rs +++ b/src/libstd/io/test.rs @@ -31,6 +31,7 @@ macro_rules! iotest ( use io::net::tcp::*; use io::net::ip::*; use io::net::udp::*; + #[cfg(unix)] use io::net::unix::*; use str; use util; diff --git a/src/libstd/rt/local_ptr.rs b/src/libstd/rt/local_ptr.rs index b75f2927003..42cce272e44 100644 --- a/src/libstd/rt/local_ptr.rs +++ b/src/libstd/rt/local_ptr.rs @@ -42,7 +42,7 @@ impl Drop for Borrowed { } let val: ~T = cast::transmute(self.val); put::(val); - assert!(exists()); + rtassert!(exists()); } } } @@ -110,7 +110,7 @@ pub mod compiled { #[inline] pub unsafe fn take() -> ~T { let ptr = RT_TLS_PTR; - assert!(!ptr.is_null()); + rtassert!(!ptr.is_null()); let ptr: ~T = cast::transmute(ptr); // can't use `as`, due to type not matching with `cfg(test)` RT_TLS_PTR = cast::transmute(0); @@ -180,7 +180,7 @@ pub mod native { } pub unsafe fn cleanup() { - assert!(INITIALIZED); + rtassert!(INITIALIZED); tls::destroy(RT_TLS_KEY); LOCK.destroy(); INITIALIZED = false; diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index d0c062c1274..0dd6c883d5b 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -176,6 +176,7 @@ pub fn init(argc: int, argv: **u8) { args::init(argc, argv); env::init(); logging::init(); + local_ptr::init(); } } diff --git a/src/libstd/rt/rtio.rs b/src/libstd/rt/rtio.rs index 97b08cc18ca..6b3d50a76ac 100644 --- a/src/libstd/rt/rtio.rs +++ b/src/libstd/rt/rtio.rs @@ -95,14 +95,16 @@ impl<'a> LocalIo<'a> { /// Returns the local I/O: either the local scheduler's I/O services or /// the native I/O services. pub fn borrow() -> Option { - // XXX: This is currently very unsafely implemented. We don't actually - // *take* the local I/O so there's a very real possibility that we - // can have two borrows at once. Currently there is not a clear way - // to actually borrow the local I/O factory safely because even if - // ownership were transferred down to the functions that the I/O - // factory implements it's just too much of a pain to know when to - // relinquish ownership back into the local task (but that would be - // the safe way of implementing this function). + // FIXME(#11053): bad + // + // This is currently very unsafely implemented. We don't actually + // *take* the local I/O so there's a very real possibility that we + // can have two borrows at once. Currently there is not a clear way + // to actually borrow the local I/O factory safely because even if + // ownership were transferred down to the functions that the I/O + // factory implements it's just too much of a pain to know when to + // relinquish ownership back into the local task (but that would be + // the safe way of implementing this function). // // In order to get around this, we just transmute a copy out of the task // in order to have what is likely a static lifetime (bad). diff --git a/src/libstd/rt/task.rs b/src/libstd/rt/task.rs index 765f0b427cd..e6ab159a769 100644 --- a/src/libstd/rt/task.rs +++ b/src/libstd/rt/task.rs @@ -15,9 +15,10 @@ use any::AnyOwnExt; use borrow; +use cast; use cleanup; use io::Writer; -use libc::{c_char, size_t}; +use iter::{Iterator, Take}; use local_data; use ops::Drop; use option::{Option, Some, None}; @@ -488,7 +489,10 @@ mod test { #[test] #[should_fail] - fn test_begin_unwind() { begin_unwind("cause", file!(), line!()) } + fn test_begin_unwind() { + use rt::unwind::begin_unwind; + begin_unwind("cause", file!(), line!()) + } // Task blocking tests diff --git a/src/libstd/rt/thread.rs b/src/libstd/rt/thread.rs index 11189282f68..f4f4aaa2765 100644 --- a/src/libstd/rt/thread.rs +++ b/src/libstd/rt/thread.rs @@ -144,15 +144,11 @@ impl Drop for Thread { #[cfg(windows)] mod imp { - use super::DEFAULT_STACK_SIZE; - use cast; use libc; use libc::types::os::arch::extra::{LPSECURITY_ATTRIBUTES, SIZE_T, BOOL, LPVOID, DWORD, LPDWORD, HANDLE}; use ptr; - use libc; - use cast; pub type rust_thread = HANDLE; pub type rust_thread_return = DWORD; diff --git a/src/libstd/rt/unwind.rs b/src/libstd/rt/unwind.rs index 8248c6274ca..9706dbae4c6 100644 --- a/src/libstd/rt/unwind.rs +++ b/src/libstd/rt/unwind.rs @@ -10,8 +10,9 @@ // Implementation of Rust stack unwinding // -// For background on exception handling and stack unwinding please see "Exception Handling in LLVM" -// (llvm.org/docs/ExceptionHandling.html) and documents linked from it. +// For background on exception handling and stack unwinding please see +// "Exception Handling in LLVM" (llvm.org/docs/ExceptionHandling.html) and +// documents linked from it. // These are also good reads: // http://theofilos.cs.columbia.edu/blog/2013/09/22/base_abi/ // http://monoinfinito.wordpress.com/series/exception-handling-in-c/ @@ -20,41 +21,55 @@ // ~~~ A brief summary ~~~ // Exception handling happens in two phases: a search phase and a cleanup phase. // -// In both phases the unwinder walks stack frames from top to bottom using information from -// the stack frame unwind sections of the current process's modules ("module" here refers to -// an OS module, i.e. an executable or a dynamic library). +// In both phases the unwinder walks stack frames from top to bottom using +// information from the stack frame unwind sections of the current process's +// modules ("module" here refers to an OS module, i.e. an executable or a +// dynamic library). // -// For each stack frame, it invokes the associated "personality routine", whose address is also -// stored in the unwind info section. +// For each stack frame, it invokes the associated "personality routine", whose +// address is also stored in the unwind info section. // -// In the search phase, the job of a personality routine is to examine exception object being -// thrown, and to decide whether it should be caught at that stack frame. Once the handler frame -// has been identified, cleanup phase begins. +// In the search phase, the job of a personality routine is to examine exception +// object being thrown, and to decide whether it should be caught at that stack +// frame. Once the handler frame has been identified, cleanup phase begins. // -// In the cleanup phase, personality routines invoke cleanup code associated with their -// stack frames (i.e. destructors). Once stack has been unwound down to the handler frame level, -// unwinding stops and the last personality routine transfers control to its' catch block. +// In the cleanup phase, personality routines invoke cleanup code associated +// with their stack frames (i.e. destructors). Once stack has been unwound down +// to the handler frame level, unwinding stops and the last personality routine +// transfers control to its' catch block. // // ~~~ Frame unwind info registration ~~~ -// Each module has its' own frame unwind info section (usually ".eh_frame"), and unwinder needs -// to know about all of them in order for unwinding to be able to cross module boundaries. +// Each module has its' own frame unwind info section (usually ".eh_frame"), and +// unwinder needs to know about all of them in order for unwinding to be able to +// cross module boundaries. // -// On some platforms, like Linux, this is achieved by dynamically enumerating currently loaded -// modules via the dl_iterate_phdr() API and finding all .eh_frame sections. +// On some platforms, like Linux, this is achieved by dynamically enumerating +// currently loaded modules via the dl_iterate_phdr() API and finding all +// .eh_frame sections. // -// Others, like Windows, require modules to actively register their unwind info sections by calling -// __register_frame_info() API at startup. -// In the latter case it is essential that there is only one copy of the unwinder runtime -// in the process. This is usually achieved by linking to the dynamic version of the unwind -// runtime. +// Others, like Windows, require modules to actively register their unwind info +// sections by calling __register_frame_info() API at startup. In the latter +// case it is essential that there is only one copy of the unwinder runtime in +// the process. This is usually achieved by linking to the dynamic version of +// the unwind runtime. // // Currently Rust uses unwind runtime provided by libgcc. -use prelude::*; -use cast::transmute; -use task::TaskResult; +use any::{Any, AnyRefExt}; +use c_str::CString; +use cast; +use kinds::Send; +use libc::{c_char, size_t}; use libc::{c_void, c_int}; -use self::libunwind::*; +use option::{Some, None, Option}; +use result::{Err, Ok}; +use rt::local::Local; +use rt::task::Task; +use str::Str; +use task::TaskResult; +use unstable::intrinsics; + +use uw = self::libunwind; mod libunwind { //! Unwind library interface @@ -109,34 +124,41 @@ mod libunwind { } pub struct Unwinder { - unwinding: bool, - cause: Option<~Any> + priv unwinding: bool, + priv cause: Option<~Any> } impl Unwinder { + pub fn new() -> Unwinder { + Unwinder { + unwinding: false, + cause: None, + } + } + + pub fn unwinding(&self) -> bool { + self.unwinding + } pub fn try(&mut self, f: ||) { use unstable::raw::Closure; unsafe { - let closure: Closure = transmute(f); - let code = transmute(closure.code); - let env = transmute(closure.env); - - let ep = rust_try(try_fn, code, env); + let closure: Closure = cast::transmute(f); + let ep = rust_try(try_fn, closure.code as *c_void, + closure.env as *c_void); if !ep.is_null() { rtdebug!("Caught {}", (*ep).exception_class); - _Unwind_DeleteException(ep); + uw::_Unwind_DeleteException(ep); } } extern fn try_fn(code: *c_void, env: *c_void) { unsafe { - let closure: Closure = Closure { - code: transmute(code), - env: transmute(env), - }; - let closure: || = transmute(closure); + let closure: || = cast::transmute(Closure { + code: code as *(), + env: env as *(), + }); closure(); } } @@ -144,10 +166,11 @@ impl Unwinder { extern { // Rust's try-catch // When f(...) returns normally, the return value is null. - // When f(...) throws, the return value is a pointer to the caught exception object. + // When f(...) throws, the return value is a pointer to the caught + // exception object. fn rust_try(f: extern "C" fn(*c_void, *c_void), code: *c_void, - data: *c_void) -> *_Unwind_Exception; + data: *c_void) -> *uw::_Unwind_Exception; } } @@ -158,21 +181,21 @@ impl Unwinder { self.cause = Some(cause); unsafe { - let exception = ~_Unwind_Exception { + let exception = ~uw::_Unwind_Exception { exception_class: rust_exception_class(), exception_cleanup: exception_cleanup, private_1: 0, private_2: 0 }; - let error = _Unwind_RaiseException(transmute(exception)); + let error = uw::_Unwind_RaiseException(cast::transmute(exception)); rtabort!("Could not unwind stack, error = {}", error as int) } - extern "C" fn exception_cleanup(_unwind_code: _Unwind_Reason_Code, - exception: *_Unwind_Exception) { + extern "C" fn exception_cleanup(_unwind_code: uw::_Unwind_Reason_Code, + exception: *uw::_Unwind_Exception) { rtdebug!("exception_cleanup()"); unsafe { - let _: ~_Unwind_Exception = transmute(exception); + let _: ~uw::_Unwind_Exception = cast::transmute(exception); } } } @@ -188,68 +211,75 @@ impl Unwinder { // Rust's exception class identifier. This is used by personality routines to // determine whether the exception was thrown by their own runtime. -fn rust_exception_class() -> _Unwind_Exception_Class { - let bytes = bytes!("MOZ\0RUST"); // vendor, language - unsafe { - let ptr: *_Unwind_Exception_Class = transmute(bytes.as_ptr()); - *ptr - } +fn rust_exception_class() -> uw::_Unwind_Exception_Class { + // M O Z \0 R U S T -- vendor, language + 0x4d4f5a_00_52555354 } - -// We could implement our personality routine in pure Rust, however exception info decoding -// is tedious. More importantly, personality routines have to handle various platform -// quirks, which are not fun to maintain. For this reason, we attempt to reuse personality -// routine of the C language: __gcc_personality_v0. +// We could implement our personality routine in pure Rust, however exception +// info decoding is tedious. More importantly, personality routines have to +// handle various platform quirks, which are not fun to maintain. For this +// reason, we attempt to reuse personality routine of the C language: +// __gcc_personality_v0. // -// Since C does not support exception catching, __gcc_personality_v0 simply always -// returns _URC_CONTINUE_UNWIND in search phase, and always returns _URC_INSTALL_CONTEXT -// (i.e. "invoke cleanup code") in cleanup phase. +// Since C does not support exception catching, __gcc_personality_v0 simply +// always returns _URC_CONTINUE_UNWIND in search phase, and always returns +// _URC_INSTALL_CONTEXT (i.e. "invoke cleanup code") in cleanup phase. // -// This is pretty close to Rust's exception handling approach, except that Rust does have -// a single "catch-all" handler at the bottom of each task's stack. +// This is pretty close to Rust's exception handling approach, except that Rust +// does have a single "catch-all" handler at the bottom of each task's stack. // So we have two versions: -// - rust_eh_personality, used by all cleanup landing pads, which never catches, so -// the behavior of __gcc_personality_v0 is perfectly adequate there, and -// - rust_eh_personality_catch, used only by rust_try(), which always catches. This is -// achieved by overriding the return value in search phase to always say "catch!". +// - rust_eh_personality, used by all cleanup landing pads, which never catches, +// so the behavior of __gcc_personality_v0 is perfectly adequate there, and +// - rust_eh_personality_catch, used only by rust_try(), which always catches. +// This is achieved by overriding the return value in search phase to always +// say "catch!". extern "C" { fn __gcc_personality_v0(version: c_int, - actions: _Unwind_Action, - exception_class: _Unwind_Exception_Class, - ue_header: *_Unwind_Exception, - context: *_Unwind_Context) -> _Unwind_Reason_Code; + actions: uw::_Unwind_Action, + exception_class: uw::_Unwind_Exception_Class, + ue_header: *uw::_Unwind_Exception, + context: *uw::_Unwind_Context) + -> uw::_Unwind_Reason_Code; } #[lang="eh_personality"] #[no_mangle] // so we can reference it by name from middle/trans/base.rs #[doc(hidden)] #[cfg(not(test))] -pub extern "C" fn rust_eh_personality(version: c_int, - actions: _Unwind_Action, - exception_class: _Unwind_Exception_Class, - ue_header: *_Unwind_Exception, - context: *_Unwind_Context) -> _Unwind_Reason_Code { +pub extern "C" fn rust_eh_personality( + version: c_int, + actions: uw::_Unwind_Action, + exception_class: uw::_Unwind_Exception_Class, + ue_header: *uw::_Unwind_Exception, + context: *uw::_Unwind_Context +) -> uw::_Unwind_Reason_Code +{ unsafe { - __gcc_personality_v0(version, actions, exception_class, ue_header, context) + __gcc_personality_v0(version, actions, exception_class, ue_header, + context) } } #[no_mangle] // referenced from rust_try.ll #[doc(hidden)] #[cfg(not(test))] -pub extern "C" fn rust_eh_personality_catch(version: c_int, - actions: _Unwind_Action, - exception_class: _Unwind_Exception_Class, - ue_header: *_Unwind_Exception, - context: *_Unwind_Context) -> _Unwind_Reason_Code { - if (actions as c_int & _UA_SEARCH_PHASE as c_int) != 0 { // search phase - _URC_HANDLER_FOUND // catch! +pub extern "C" fn rust_eh_personality_catch( + version: c_int, + actions: uw::_Unwind_Action, + exception_class: uw::_Unwind_Exception_Class, + ue_header: *uw::_Unwind_Exception, + context: *uw::_Unwind_Context +) -> uw::_Unwind_Reason_Code +{ + if (actions as c_int & uw::_UA_SEARCH_PHASE as c_int) != 0 { // search phase + uw::_URC_HANDLER_FOUND // catch! } else { // cleanup phase unsafe { - __gcc_personality_v0(version, actions, exception_class, ue_header, context) + __gcc_personality_v0(version, actions, exception_class, ue_header, + context) } } } @@ -307,11 +337,11 @@ pub fn begin_unwind(msg: M, file: &'static str, line: uint) -> ! let n = (*task).name.as_ref() .map(|n| n.as_slice()).unwrap_or(""); - println!("task '{}' failed at '{}', {}:{}", n, msg_s, + rterrln!("task '{}' failed at '{}', {}:{}", n, msg_s, file, line); } None => { - println!("failed at '{}', {}:{}", msg_s, file, line); + rterrln!("failed at '{}', {}:{}", msg_s, file, line); intrinsics::abort(); } } diff --git a/src/libstd/rt/util.rs b/src/libstd/rt/util.rs index 69c1da39abc..730a38ce886 100644 --- a/src/libstd/rt/util.rs +++ b/src/libstd/rt/util.rs @@ -69,14 +69,13 @@ pub fn default_sched_threads() -> uint { pub fn dumb_println(args: &fmt::Arguments) { use io; use libc; - use vec; struct Stderr; impl io::Writer for Stderr { fn write(&mut self, data: &[u8]) { unsafe { libc::write(libc::STDERR_FILENO, - vec::raw::to_ptr(data) as *libc::c_void, + data.as_ptr() as *libc::c_void, data.len() as libc::size_t); } } diff --git a/src/libstd/sync/arc.rs b/src/libstd/sync/arc.rs index b405104c09a..7b94a3acc2b 100644 --- a/src/libstd/sync/arc.rs +++ b/src/libstd/sync/arc.rs @@ -32,7 +32,7 @@ use vec; /// An atomically reference counted pointer. /// /// Enforces no shared-memory safety. -//#[unsafe_no_drop_flag] FIXME: #9758 +#[unsafe_no_drop_flag] pub struct UnsafeArc { priv data: *mut ArcData, } diff --git a/src/libstd/unstable/stack.rs b/src/libstd/unstable/stack.rs index b8788b8c55c..d6cd690eaa9 100644 --- a/src/libstd/unstable/stack.rs +++ b/src/libstd/unstable/stack.rs @@ -192,6 +192,7 @@ pub unsafe fn record_sp_limit(limit: uint) { #[cfg(target_arch = "mips")] #[cfg(target_arch = "arm")] #[inline(always)] unsafe fn target_record_sp_limit(limit: uint) { + use libc::c_void; return record_sp_limit(limit as *c_void); extern { fn record_sp_limit(limit: *c_void); @@ -265,6 +266,7 @@ pub unsafe fn get_sp_limit() -> uint { #[cfg(target_arch = "mips")] #[cfg(target_arch = "arm")] #[inline(always)] unsafe fn target_get_sp_limit() -> uint { + use libc::c_void; return get_sp_limit() as uint; extern { fn get_sp_limit() -> *c_void; diff --git a/src/test/bench/rt-messaging-ping-pong.rs b/src/test/bench/rt-messaging-ping-pong.rs index 90d81aa7c3e..6eef71622c5 100644 --- a/src/test/bench/rt-messaging-ping-pong.rs +++ b/src/test/bench/rt-messaging-ping-pong.rs @@ -1,4 +1,3 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -12,7 +11,6 @@ extern mod extra; use std::os; use std::uint; -use std::rt::test::spawntask_later; // This is a simple bench that creates M pairs of of tasks. These // tasks ping-pong back and forth over a pair of streams. This is a @@ -28,7 +26,7 @@ fn ping_pong_bench(n: uint, m: uint) { // Create a stream B->A let (pb,cb) = Chan::<()>::new(); - do spawntask_later() || { + do spawn() || { let chan = ca; let port = pb; n.times(|| { @@ -37,7 +35,7 @@ fn ping_pong_bench(n: uint, m: uint) { }) } - do spawntask_later() || { + do spawn() || { let chan = cb; let port = pa; n.times(|| { diff --git a/src/test/bench/rt-parfib.rs b/src/test/bench/rt-parfib.rs index ab607d9aebc..6e3c42f2a4d 100644 --- a/src/test/bench/rt-parfib.rs +++ b/src/test/bench/rt-parfib.rs @@ -12,7 +12,6 @@ extern mod extra; use std::os; use std::uint; -use std::rt::test::spawntask_later; // A simple implementation of parfib. One subtree is found in a new // task and communicated over a oneshot pipe, the other is found @@ -24,7 +23,7 @@ fn parfib(n: uint) -> uint { } let (port,chan) = Chan::new(); - do spawntask_later { + do spawn { chan.send(parfib(n-1)); }; let m2 = parfib(n-2); diff --git a/src/test/bench/shootout-spectralnorm.rs b/src/test/bench/shootout-spectralnorm.rs index 87cd01f9aad..8174347e386 100644 --- a/src/test/bench/shootout-spectralnorm.rs +++ b/src/test/bench/shootout-spectralnorm.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +// xfail-test arcs no longer unwrap + extern mod extra; use std::from_str::FromStr; diff --git a/src/test/compile-fail/std-uncopyable-atomics.rs b/src/test/compile-fail/std-uncopyable-atomics.rs index a46dec7830a..57c66974fcd 100644 --- a/src/test/compile-fail/std-uncopyable-atomics.rs +++ b/src/test/compile-fail/std-uncopyable-atomics.rs @@ -12,7 +12,7 @@ #[feature(globs)]; -use std::unstable::atomics::*; +use std::sync::atomics::*; use std::ptr; fn main() { diff --git a/src/test/run-make/bootstrap-from-c-with-green/Makefile b/src/test/run-make/bootstrap-from-c-with-green/Makefile new file mode 100644 index 00000000000..7f466573da7 --- /dev/null +++ b/src/test/run-make/bootstrap-from-c-with-green/Makefile @@ -0,0 +1,9 @@ +-include ../tools.mk + +all: + $(RUSTC) lib.rs -Z gen-crate-map + ln -nsf $(call DYLIB,boot-*) $(call DYLIB,boot) + $(CC) main.c -o $(call RUN,main) -lboot -Wl,-rpath,$(TMPDIR) + $(call RUN,main) + rm $(call DYLIB,boot) + $(call FAIL,main) diff --git a/src/test/run-make/bootstrap-from-c-with-green/lib.rs b/src/test/run-make/bootstrap-from-c-with-green/lib.rs new file mode 100644 index 00000000000..9a03c772f3a --- /dev/null +++ b/src/test/run-make/bootstrap-from-c-with-green/lib.rs @@ -0,0 +1,25 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[crate_id="boot#0.1"]; +#[crate_type="lib"]; +#[no_uv]; + +extern mod rustuv; +extern mod green; + +#[no_mangle] // this needs to get called from C +pub extern "C" fn foo(argc: int, argv: **u8) -> int { + do green::start(argc, argv) { + do spawn { + println!("hello"); + } + } +} diff --git a/src/test/run-make/bootstrap-from-c-with-green/main.c b/src/test/run-make/bootstrap-from-c-with-green/main.c new file mode 100644 index 00000000000..1872c1ea43b --- /dev/null +++ b/src/test/run-make/bootstrap-from-c-with-green/main.c @@ -0,0 +1,16 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// this is the rust entry point that we're going to call. +int foo(int argc, char *argv[]); + +int main(int argc, char *argv[]) { + return foo(argc, argv); +} diff --git a/src/test/run-make/bootstrap-from-c-with-native/Makefile b/src/test/run-make/bootstrap-from-c-with-native/Makefile new file mode 100644 index 00000000000..7f466573da7 --- /dev/null +++ b/src/test/run-make/bootstrap-from-c-with-native/Makefile @@ -0,0 +1,9 @@ +-include ../tools.mk + +all: + $(RUSTC) lib.rs -Z gen-crate-map + ln -nsf $(call DYLIB,boot-*) $(call DYLIB,boot) + $(CC) main.c -o $(call RUN,main) -lboot -Wl,-rpath,$(TMPDIR) + $(call RUN,main) + rm $(call DYLIB,boot) + $(call FAIL,main) diff --git a/src/test/run-make/bootstrap-from-c-with-native/lib.rs b/src/test/run-make/bootstrap-from-c-with-native/lib.rs new file mode 100644 index 00000000000..d0639d45fa5 --- /dev/null +++ b/src/test/run-make/bootstrap-from-c-with-native/lib.rs @@ -0,0 +1,24 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#[crate_id="boot#0.1"]; +#[crate_type="lib"]; +#[no_uv]; + +extern mod native; + +#[no_mangle] // this needs to get called from C +pub extern "C" fn foo(argc: int, argv: **u8) -> int { + do native::start(argc, argv) { + do spawn { + println!("hello"); + } + } +} diff --git a/src/test/run-make/bootstrap-from-c-with-native/main.c b/src/test/run-make/bootstrap-from-c-with-native/main.c new file mode 100644 index 00000000000..1872c1ea43b --- /dev/null +++ b/src/test/run-make/bootstrap-from-c-with-native/main.c @@ -0,0 +1,16 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +// this is the rust entry point that we're going to call. +int foo(int argc, char *argv[]); + +int main(int argc, char *argv[]) { + return foo(argc, argv); +} diff --git a/src/test/run-make/bootstrap-from-c-with-uvio/Makefile b/src/test/run-make/bootstrap-from-c-with-uvio/Makefile deleted file mode 100644 index 7f466573da7..00000000000 --- a/src/test/run-make/bootstrap-from-c-with-uvio/Makefile +++ /dev/null @@ -1,9 +0,0 @@ --include ../tools.mk - -all: - $(RUSTC) lib.rs -Z gen-crate-map - ln -nsf $(call DYLIB,boot-*) $(call DYLIB,boot) - $(CC) main.c -o $(call RUN,main) -lboot -Wl,-rpath,$(TMPDIR) - $(call RUN,main) - rm $(call DYLIB,boot) - $(call FAIL,main) diff --git a/src/test/run-make/bootstrap-from-c-with-uvio/lib.rs b/src/test/run-make/bootstrap-from-c-with-uvio/lib.rs deleted file mode 100644 index 06a06c967f4..00000000000 --- a/src/test/run-make/bootstrap-from-c-with-uvio/lib.rs +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -#[crate_id="boot#0.1"]; -#[crate_type="lib"]; - -extern mod rustuv; // pull in uvio - -use std::rt; - -#[no_mangle] // this needs to get called from C -pub extern "C" fn foo(argc: int, argv: **u8) -> int { - do rt::start(argc, argv) { - do spawn { - println!("hello"); - } - } -} diff --git a/src/test/run-make/bootstrap-from-c-with-uvio/main.c b/src/test/run-make/bootstrap-from-c-with-uvio/main.c deleted file mode 100644 index 1872c1ea43b..00000000000 --- a/src/test/run-make/bootstrap-from-c-with-uvio/main.c +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// this is the rust entry point that we're going to call. -int foo(int argc, char *argv[]); - -int main(int argc, char *argv[]) { - return foo(argc, argv); -} diff --git a/src/test/run-pass/use.rs b/src/test/run-pass/use.rs index 56ce5397efb..013487e5803 100644 --- a/src/test/run-pass/use.rs +++ b/src/test/run-pass/use.rs @@ -28,4 +28,4 @@ mod baz { } #[start] -pub fn start(_: int, _: **u8) -> int { 3 } +pub fn start(_: int, _: **u8) -> int { 0 } -- cgit 1.4.1-3-g733a5