diff options
| author | Aaron Turon <aturon@mozilla.com> | 2015-03-20 00:46:13 -0700 |
|---|---|---|
| committer | Aaron Turon <aturon@mozilla.com> | 2015-03-23 11:28:54 -0700 |
| commit | 6bd3ab0d8140053475a901ad4e2e80e98955bcb0 (patch) | |
| tree | b8d6a880328d2fd590634319a047cabe66630632 /src/libstd/thread | |
| parent | b0aad7dd4fad8d7e2e2f877a511a637258949597 (diff) | |
| download | rust-6bd3ab0d8140053475a901ad4e2e80e98955bcb0.tar.gz rust-6bd3ab0d8140053475a901ad4e2e80e98955bcb0.zip | |
Implement RFC 909: move thread_local into thread
This commit implements [RFC 909](https://github.com/rust-lang/rfcs/pull/909): The `std::thread_local` module is now deprecated, and its contents are available directly in `std::thread` as `LocalKey`, `LocalKeyState`, and `ScopedKey`. The macros remain exactly as they were, which means little if any code should break. Nevertheless, this is technically a: [breaking-change] Closes #23547
Diffstat (limited to 'src/libstd/thread')
| -rw-r--r-- | src/libstd/thread/local.rs | 735 | ||||
| -rw-r--r-- | src/libstd/thread/mod.rs | 1026 | ||||
| -rw-r--r-- | src/libstd/thread/scoped.rs | 317 |
3 files changed, 2078 insertions, 0 deletions
diff --git a/src/libstd/thread/local.rs b/src/libstd/thread/local.rs new file mode 100644 index 00000000000..43142d2e5bc --- /dev/null +++ b/src/libstd/thread/local.rs @@ -0,0 +1,735 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Thread local storage + +#![unstable(feature = "thread_local_internals")] + +use prelude::v1::*; + +use cell::UnsafeCell; + +// Sure wish we had macro hygiene, no? +#[doc(hidden)] +#[unstable(feature = "thread_local_internals")] +pub mod __impl { + pub use super::imp::Key as KeyInner; + pub use super::imp::destroy_value; + pub use sys_common::thread_local::INIT_INNER as OS_INIT_INNER; + pub use sys_common::thread_local::StaticKey as OsStaticKey; +} + +/// A thread local storage key which owns its contents. +/// +/// This key uses the fastest possible implementation available to it for the +/// target platform. It is instantiated with the `thread_local!` macro and the +/// primary method is the `with` method. +/// +/// The `with` method yields a reference to the contained value which cannot be +/// sent across tasks or escape the given closure. +/// +/// # Initialization and Destruction +/// +/// Initialization is dynamically performed on the first call to `with()` +/// within a thread, and values support destructors which will be run when a +/// thread exits. +/// +/// # Examples +/// +/// ``` +/// use std::cell::RefCell; +/// use std::thread; +/// +/// thread_local!(static FOO: RefCell<u32> = RefCell::new(1)); +/// +/// FOO.with(|f| { +/// assert_eq!(*f.borrow(), 1); +/// *f.borrow_mut() = 2; +/// }); +/// +/// // each thread starts out with the initial value of 1 +/// thread::spawn(move|| { +/// FOO.with(|f| { +/// assert_eq!(*f.borrow(), 1); +/// *f.borrow_mut() = 3; +/// }); +/// }); +/// +/// // we retain our original value of 2 despite the child thread +/// FOO.with(|f| { +/// assert_eq!(*f.borrow(), 2); +/// }); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub struct LocalKey<T> { + // The key itself may be tagged with #[thread_local], and this `Key` is + // stored as a `static`, and it's not valid for a static to reference the + // address of another thread_local static. For this reason we kinda wonkily + // work around this by generating a shim function which will give us the + // address of the inner TLS key at runtime. + // + // This is trivially devirtualizable by LLVM because we never store anything + // to this field and rustc can declare the `static` as constant as well. + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub inner: fn() -> &'static __impl::KeyInner<UnsafeCell<Option<T>>>, + + // initialization routine to invoke to create a value + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub init: fn() -> T, +} + +/// Declare a new thread local storage key of type `std::thread::LocalKey`. +#[macro_export] +#[stable(feature = "rust1", since = "1.0.0")] +#[allow_internal_unstable] +macro_rules! thread_local { + (static $name:ident: $t:ty = $init:expr) => ( + static $name: ::std::thread::LocalKey<$t> = { + use std::cell::UnsafeCell as __UnsafeCell; + use std::thread::__local::__impl::KeyInner as __KeyInner; + use std::option::Option as __Option; + use std::option::Option::None as __None; + + __thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = { + __UnsafeCell { value: __None } + }); + fn __init() -> $t { $init } + fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> { + &__KEY + } + ::std::thread::LocalKey { inner: __getit, init: __init } + }; + ); + (pub static $name:ident: $t:ty = $init:expr) => ( + pub static $name: ::std::thread::LocalKey<$t> = { + use std::cell::UnsafeCell as __UnsafeCell; + use std::thread::__local::__impl::KeyInner as __KeyInner; + use std::option::Option as __Option; + use std::option::Option::None as __None; + + __thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = { + __UnsafeCell { value: __None } + }); + fn __init() -> $t { $init } + fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> { + &__KEY + } + ::std::thread::LocalKey { inner: __getit, init: __init } + }; + ); +} + +// Macro pain #4586: +// +// When cross compiling, rustc will load plugins and macros from the *host* +// platform before search for macros from the target platform. This is primarily +// done to detect, for example, plugins. Ideally the macro below would be +// defined once per module below, but unfortunately this means we have the +// following situation: +// +// 1. We compile libstd for x86_64-unknown-linux-gnu, this thread_local!() macro +// will inject #[thread_local] statics. +// 2. We then try to compile a program for arm-linux-androideabi +// 3. The compiler has a host of linux and a target of android, so it loads +// macros from the *linux* libstd. +// 4. The macro generates a #[thread_local] field, but the android libstd does +// not use #[thread_local] +// 5. Compile error about structs with wrong fields. +// +// To get around this, we're forced to inject the #[cfg] logic into the macro +// itself. Woohoo. + +#[macro_export] +#[doc(hidden)] +#[allow_internal_unstable] +macro_rules! __thread_local_inner { + (static $name:ident: $t:ty = $init:expr) => ( + #[cfg_attr(all(any(target_os = "macos", target_os = "linux"), + not(target_arch = "aarch64")), + thread_local)] + static $name: ::std::thread::__local::__impl::KeyInner<$t> = + __thread_local_inner!($init, $t); + ); + (pub static $name:ident: $t:ty = $init:expr) => ( + #[cfg_attr(all(any(target_os = "macos", target_os = "linux"), + not(target_arch = "aarch64")), + thread_local)] + pub static $name: ::std::thread::__local::__impl::KeyInner<$t> = + __thread_local_inner!($init, $t); + ); + ($init:expr, $t:ty) => ({ + #[cfg(all(any(target_os = "macos", target_os = "linux"), not(target_arch = "aarch64")))] + const _INIT: ::std::thread::__local::__impl::KeyInner<$t> = { + ::std::thread::__local::__impl::KeyInner { + inner: ::std::cell::UnsafeCell { value: $init }, + dtor_registered: ::std::cell::UnsafeCell { value: false }, + dtor_running: ::std::cell::UnsafeCell { value: false }, + } + }; + + #[cfg(any(not(any(target_os = "macos", target_os = "linux")), target_arch = "aarch64"))] + const _INIT: ::std::thread::__local::__impl::KeyInner<$t> = { + unsafe extern fn __destroy(ptr: *mut u8) { + ::std::thread::__local::__impl::destroy_value::<$t>(ptr); + } + + ::std::thread::__local::__impl::KeyInner { + inner: ::std::cell::UnsafeCell { value: $init }, + os: ::std::thread::__local::__impl::OsStaticKey { + inner: ::std::thread::__local::__impl::OS_INIT_INNER, + dtor: ::std::option::Option::Some(__destroy as unsafe extern fn(*mut u8)), + }, + } + }; + + _INIT + }); +} + +/// Indicator of the state of a thread local storage key. +#[unstable(feature = "std_misc", + reason = "state querying was recently added")] +#[derive(Eq, PartialEq, Copy)] +pub enum LocalKeyState { + /// All keys are in this state whenever a thread starts. Keys will + /// transition to the `Valid` state once the first call to `with` happens + /// and the initialization expression succeeds. + /// + /// Keys in the `Uninitialized` state will yield a reference to the closure + /// passed to `with` so long as the initialization routine does not panic. + Uninitialized, + + /// Once a key has been accessed successfully, it will enter the `Valid` + /// state. Keys in the `Valid` state will remain so until the thread exits, + /// at which point the destructor will be run and the key will enter the + /// `Destroyed` state. + /// + /// Keys in the `Valid` state will be guaranteed to yield a reference to the + /// closure passed to `with`. + Valid, + + /// When a thread exits, the destructors for keys will be run (if + /// necessary). While a destructor is running, and possibly after a + /// destructor has run, a key is in the `Destroyed` state. + /// + /// Keys in the `Destroyed` states will trigger a panic when accessed via + /// `with`. + Destroyed, +} + +impl<T: 'static> LocalKey<T> { + /// Acquire a reference to the value in this TLS key. + /// + /// This will lazily initialize the value if this thread has not referenced + /// this key yet. + /// + /// # Panics + /// + /// This function will `panic!()` if the key currently has its + /// destructor running, and it **may** panic if the destructor has + /// previously been run for this thread. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn with<F, R>(&'static self, f: F) -> R + where F: FnOnce(&T) -> R { + let slot = (self.inner)(); + unsafe { + let slot = slot.get().expect("cannot access a TLS value during or \ + after it is destroyed"); + f(match *slot.get() { + Some(ref inner) => inner, + None => self.init(slot), + }) + } + } + + unsafe fn init(&self, slot: &UnsafeCell<Option<T>>) -> &T { + // Execute the initialization up front, *then* move it into our slot, + // just in case initialization fails. + let value = (self.init)(); + let ptr = slot.get(); + *ptr = Some(value); + (*ptr).as_ref().unwrap() + } + + /// Query the current state of this key. + /// + /// A key is initially in the `Uninitialized` state whenever a thread + /// starts. It will remain in this state up until the first call to `with` + /// within a thread has run the initialization expression successfully. + /// + /// Once the initialization expression succeeds, the key transitions to the + /// `Valid` state which will guarantee that future calls to `with` will + /// succeed within the thread. + /// + /// When a thread exits, each key will be destroyed in turn, and as keys are + /// destroyed they will enter the `Destroyed` state just before the + /// destructor starts to run. Keys may remain in the `Destroyed` state after + /// destruction has completed. Keys without destructors (e.g. with types + /// that are `Copy`), may never enter the `Destroyed` state. + /// + /// Keys in the `Uninitialized` can be accessed so long as the + /// initialization does not panic. Keys in the `Valid` state are guaranteed + /// to be able to be accessed. Keys in the `Destroyed` state will panic on + /// any call to `with`. + #[unstable(feature = "std_misc", + reason = "state querying was recently added")] + pub fn state(&'static self) -> LocalKeyState { + unsafe { + match (self.inner)().get() { + Some(cell) => { + match *cell.get() { + Some(..) => LocalKeyState::Valid, + None => LocalKeyState::Uninitialized, + } + } + None => LocalKeyState::Destroyed, + } + } + } + + /// Deprecated + #[unstable(feature = "std_misc")] + #[deprecated(since = "1.0.0", + reason = "function renamed to state() and returns more info")] + pub fn destroyed(&'static self) -> bool { self.state() == LocalKeyState::Destroyed } +} + +#[cfg(all(any(target_os = "macos", target_os = "linux"), not(target_arch = "aarch64")))] +mod imp { + use prelude::v1::*; + + use cell::UnsafeCell; + use intrinsics; + use ptr; + + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub struct Key<T> { + // Place the inner bits in an `UnsafeCell` to currently get around the + // "only Sync statics" restriction. This allows any type to be placed in + // the cell. + // + // Note that all access requires `T: 'static` so it can't be a type with + // any borrowed pointers still. + #[unstable(feature = "thread_local_internals")] + pub inner: UnsafeCell<T>, + + // Metadata to keep track of the state of the destructor. Remember that + // these variables are thread-local, not global. + #[unstable(feature = "thread_local_internals")] + pub dtor_registered: UnsafeCell<bool>, // should be Cell + #[unstable(feature = "thread_local_internals")] + pub dtor_running: UnsafeCell<bool>, // should be Cell + } + + unsafe impl<T> ::marker::Sync for Key<T> { } + + #[doc(hidden)] + impl<T> Key<T> { + pub unsafe fn get(&'static self) -> Option<&'static T> { + if intrinsics::needs_drop::<T>() && *self.dtor_running.get() { + return None + } + self.register_dtor(); + Some(&*self.inner.get()) + } + + unsafe fn register_dtor(&self) { + if !intrinsics::needs_drop::<T>() || *self.dtor_registered.get() { + return + } + + register_dtor(self as *const _ as *mut u8, + destroy_value::<T>); + *self.dtor_registered.get() = true; + } + } + + // Since what appears to be glibc 2.18 this symbol has been shipped which + // GCC and clang both use to invoke destructors in thread_local globals, so + // let's do the same! + // + // Note, however, that we run on lots older linuxes, as well as cross + // compiling from a newer linux to an older linux, so we also have a + // fallback implementation to use as well. + // + // Due to rust-lang/rust#18804, make sure this is not generic! + #[cfg(target_os = "linux")] + unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) { + use boxed; + use mem; + use libc; + use sys_common::thread_local as os; + + extern { + static __dso_handle: *mut u8; + #[linkage = "extern_weak"] + static __cxa_thread_atexit_impl: *const (); + } + if !__cxa_thread_atexit_impl.is_null() { + type F = unsafe extern fn(dtor: unsafe extern fn(*mut u8), + arg: *mut u8, + dso_handle: *mut u8) -> libc::c_int; + mem::transmute::<*const (), F>(__cxa_thread_atexit_impl) + (dtor, t, __dso_handle); + return + } + + // The fallback implementation uses a vanilla OS-based TLS key to track + // the list of destructors that need to be run for this thread. The key + // then has its own destructor which runs all the other destructors. + // + // The destructor for DTORS is a little special in that it has a `while` + // loop to continuously drain the list of registered destructors. It + // *should* be the case that this loop always terminates because we + // provide the guarantee that a TLS key cannot be set after it is + // flagged for destruction. + static DTORS: os::StaticKey = os::StaticKey { + inner: os::INIT_INNER, + dtor: Some(run_dtors as unsafe extern "C" fn(*mut u8)), + }; + type List = Vec<(*mut u8, unsafe extern fn(*mut u8))>; + if DTORS.get().is_null() { + let v: Box<List> = box Vec::new(); + DTORS.set(boxed::into_raw(v) as *mut u8); + } + let list: &mut List = &mut *(DTORS.get() as *mut List); + list.push((t, dtor)); + + unsafe extern fn run_dtors(mut ptr: *mut u8) { + while !ptr.is_null() { + let list: Box<List> = Box::from_raw(ptr as *mut List); + for &(ptr, dtor) in &*list { + dtor(ptr); + } + ptr = DTORS.get(); + DTORS.set(ptr::null_mut()); + } + } + } + + // OSX's analog of the above linux function is this _tlv_atexit function. + // The disassembly of thread_local globals in C++ (at least produced by + // clang) will have this show up in the output. + #[cfg(target_os = "macos")] + unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) { + extern { + fn _tlv_atexit(dtor: unsafe extern fn(*mut u8), + arg: *mut u8); + } + _tlv_atexit(dtor, t); + } + + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub unsafe extern fn destroy_value<T>(ptr: *mut u8) { + let ptr = ptr as *mut Key<T>; + // Right before we run the user destructor be sure to flag the + // destructor as running for this thread so calls to `get` will return + // `None`. + *(*ptr).dtor_running.get() = true; + ptr::read((*ptr).inner.get()); + } +} + +#[cfg(any(not(any(target_os = "macos", target_os = "linux")), target_arch = "aarch64"))] +mod imp { + use prelude::v1::*; + + use alloc::boxed; + use cell::UnsafeCell; + use mem; + use ptr; + use sys_common::thread_local::StaticKey as OsStaticKey; + + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub struct Key<T> { + // Statically allocated initialization expression, using an `UnsafeCell` + // for the same reasons as above. + #[unstable(feature = "thread_local_internals")] + pub inner: UnsafeCell<T>, + + // OS-TLS key that we'll use to key off. + #[unstable(feature = "thread_local_internals")] + pub os: OsStaticKey, + } + + unsafe impl<T> ::marker::Sync for Key<T> { } + + struct Value<T: 'static> { + key: &'static Key<T>, + value: T, + } + + #[doc(hidden)] + impl<T> Key<T> { + pub unsafe fn get(&'static self) -> Option<&'static T> { + self.ptr().map(|p| &*p) + } + + unsafe fn ptr(&'static self) -> Option<*mut T> { + let ptr = self.os.get() as *mut Value<T>; + if !ptr.is_null() { + if ptr as usize == 1 { + return None + } + return Some(&mut (*ptr).value as *mut T); + } + + // If the lookup returned null, we haven't initialized our own local + // copy, so do that now. + // + // Also note that this transmute_copy should be ok because the value + // `inner` is already validated to be a valid `static` value, so we + // should be able to freely copy the bits. + let ptr: Box<Value<T>> = box Value { + key: self, + value: mem::transmute_copy(&self.inner), + }; + let ptr: *mut Value<T> = boxed::into_raw(ptr); + self.os.set(ptr as *mut u8); + Some(&mut (*ptr).value as *mut T) + } + } + + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub unsafe extern fn destroy_value<T: 'static>(ptr: *mut u8) { + // The OS TLS ensures that this key contains a NULL value when this + // destructor starts to run. We set it back to a sentinel value of 1 to + // ensure that any future calls to `get` for this thread will return + // `None`. + // + // Note that to prevent an infinite loop we reset it back to null right + // before we return from the destructor ourselves. + let ptr: Box<Value<T>> = Box::from_raw(ptr as *mut Value<T>); + let key = ptr.key; + key.os.set(1 as *mut u8); + drop(ptr); + key.os.set(ptr::null_mut()); + } +} + +#[cfg(test)] +mod tests { + use prelude::v1::*; + + use sync::mpsc::{channel, Sender}; + use cell::UnsafeCell; + use super::LocalKeyState; + use thread; + + struct Foo(Sender<()>); + + impl Drop for Foo { + fn drop(&mut self) { + let Foo(ref s) = *self; + s.send(()).unwrap(); + } + } + + #[test] + fn smoke_no_dtor() { + thread_local!(static FOO: UnsafeCell<i32> = UnsafeCell { value: 1 }); + + FOO.with(|f| unsafe { + assert_eq!(*f.get(), 1); + *f.get() = 2; + }); + let (tx, rx) = channel(); + let _t = thread::spawn(move|| { + FOO.with(|f| unsafe { + assert_eq!(*f.get(), 1); + }); + tx.send(()).unwrap(); + }); + rx.recv().unwrap(); + + FOO.with(|f| unsafe { + assert_eq!(*f.get(), 2); + }); + } + + #[test] + fn states() { + struct Foo; + impl Drop for Foo { + fn drop(&mut self) { + assert!(FOO.state() == LocalKeyState::Destroyed); + } + } + fn foo() -> Foo { + assert!(FOO.state() == LocalKeyState::Uninitialized); + Foo + } + thread_local!(static FOO: Foo = foo()); + + thread::spawn(|| { + assert!(FOO.state() == LocalKeyState::Uninitialized); + FOO.with(|_| { + assert!(FOO.state() == LocalKeyState::Valid); + }); + assert!(FOO.state() == LocalKeyState::Valid); + }).join().ok().unwrap(); + } + + #[test] + fn smoke_dtor() { + thread_local!(static FOO: UnsafeCell<Option<Foo>> = UnsafeCell { + value: None + }); + + let (tx, rx) = channel(); + let _t = thread::spawn(move|| unsafe { + let mut tx = Some(tx); + FOO.with(|f| { + *f.get() = Some(Foo(tx.take().unwrap())); + }); + }); + rx.recv().unwrap(); + } + + #[test] + fn circular() { + struct S1; + struct S2; + thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell { + value: None + }); + thread_local!(static K2: UnsafeCell<Option<S2>> = UnsafeCell { + value: None + }); + static mut HITS: u32 = 0; + + impl Drop for S1 { + fn drop(&mut self) { + unsafe { + HITS += 1; + if K2.state() == LocalKeyState::Destroyed { + assert_eq!(HITS, 3); + } else { + if HITS == 1 { + K2.with(|s| *s.get() = Some(S2)); + } else { + assert_eq!(HITS, 3); + } + } + } + } + } + impl Drop for S2 { + fn drop(&mut self) { + unsafe { + HITS += 1; + assert!(K1.state() != LocalKeyState::Destroyed); + assert_eq!(HITS, 2); + K1.with(|s| *s.get() = Some(S1)); + } + } + } + + thread::spawn(move|| { + drop(S1); + }).join().ok().unwrap(); + } + + #[test] + fn self_referential() { + struct S1; + thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell { + value: None + }); + + impl Drop for S1 { + fn drop(&mut self) { + assert!(K1.state() == LocalKeyState::Destroyed); + } + } + + thread::spawn(move|| unsafe { + K1.with(|s| *s.get() = Some(S1)); + }).join().ok().unwrap(); + } + + #[test] + fn dtors_in_dtors_in_dtors() { + struct S1(Sender<()>); + thread_local!(static K1: UnsafeCell<Option<S1>> = UnsafeCell { + value: None + }); + thread_local!(static K2: UnsafeCell<Option<Foo>> = UnsafeCell { + value: None + }); + + impl Drop for S1 { + fn drop(&mut self) { + let S1(ref tx) = *self; + unsafe { + if K2.state() != LocalKeyState::Destroyed { + K2.with(|s| *s.get() = Some(Foo(tx.clone()))); + } + } + } + } + + let (tx, rx) = channel(); + let _t = thread::spawn(move|| unsafe { + let mut tx = Some(tx); + K1.with(|s| *s.get() = Some(S1(tx.take().unwrap()))); + }); + rx.recv().unwrap(); + } +} + +#[cfg(test)] +mod dynamic_tests { + use prelude::v1::*; + + use cell::RefCell; + use collections::HashMap; + + #[test] + fn smoke() { + fn square(i: i32) -> i32 { i * i } + thread_local!(static FOO: i32 = square(3)); + + FOO.with(|f| { + assert_eq!(*f, 9); + }); + } + + #[test] + fn hashmap() { + fn map() -> RefCell<HashMap<i32, i32>> { + let mut m = HashMap::new(); + m.insert(1, 2); + RefCell::new(m) + } + thread_local!(static FOO: RefCell<HashMap<i32, i32>> = map()); + + FOO.with(|map| { + assert_eq!(map.borrow()[1], 2); + }); + } + + #[test] + fn refcell_vec() { + thread_local!(static FOO: RefCell<Vec<u32>> = RefCell::new(vec![1, 2, 3])); + + FOO.with(|vec| { + assert_eq!(vec.borrow().len(), 3); + vec.borrow_mut().push(4); + assert_eq!(vec.borrow()[3], 4); + }); + } +} diff --git a/src/libstd/thread/mod.rs b/src/libstd/thread/mod.rs new file mode 100644 index 00000000000..57baeb1fb74 --- /dev/null +++ b/src/libstd/thread/mod.rs @@ -0,0 +1,1026 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Native threads +//! +//! ## The threading model +//! +//! An executing Rust program consists of a collection of native OS threads, +//! each with their own stack and local state. +//! +//! Communication between threads can be done through +//! [channels](../../std/sync/mpsc/index.html), Rust's message-passing +//! types, along with [other forms of thread +//! synchronization](../../std/sync/index.html) and shared-memory data +//! structures. In particular, types that are guaranteed to be +//! threadsafe are easily shared between threads using the +//! atomically-reference-counted container, +//! [`Arc`](../../std/sync/struct.Arc.html). +//! +//! Fatal logic errors in Rust cause *thread panic*, during which +//! a thread will unwind the stack, running destructors and freeing +//! owned resources. Thread panic is unrecoverable from within +//! the panicking thread (i.e. there is no 'try/catch' in Rust), but +//! the panic may optionally be detected from a different thread. If +//! the main thread panics, the application will exit with a non-zero +//! exit code. +//! +//! When the main thread of a Rust program terminates, the entire program shuts +//! down, even if other threads are still running. However, this module provides +//! convenient facilities for automatically waiting for the termination of a +//! child thread (i.e., join). +//! +//! ## The `Thread` type +//! +//! Threads are represented via the `Thread` type, which you can +//! get in one of two ways: +//! +//! * By spawning a new thread, e.g. using the `thread::spawn` function. +//! * By requesting the current thread, using the `thread::current` function. +//! +//! Threads can be named, and provide some built-in support for low-level +//! synchronization (described below). +//! +//! The `thread::current()` function is available even for threads not spawned +//! by the APIs of this module. +//! +//! ## Spawning a thread +//! +//! A new thread can be spawned using the `thread::spawn` function: +//! +//! ```rust +//! use std::thread; +//! +//! thread::spawn(move || { +//! // some work here +//! }); +//! ``` +//! +//! In this example, the spawned thread is "detached" from the current +//! thread. This means that it can outlive its parent (the thread that spawned +//! it), unless this parent is the main thread. +//! +//! ## Scoped threads +//! +//! Often a parent thread uses a child thread to perform some particular task, +//! and at some point must wait for the child to complete before continuing. +//! For this scenario, use the `thread::scoped` function: +//! +//! ```rust +//! use std::thread; +//! +//! let guard = thread::scoped(move || { +//! // some work here +//! }); +//! +//! // do some other work in the meantime +//! let output = guard.join(); +//! ``` +//! +//! The `scoped` function doesn't return a `Thread` directly; instead, +//! it returns a *join guard*. The join guard is an RAII-style guard +//! that will automatically join the child thread (block until it +//! terminates) when it is dropped. You can join the child thread in +//! advance by calling the `join` method on the guard, which will also +//! return the result produced by the thread. A handle to the thread +//! itself is available via the `thread` method of the join guard. +//! +//! ## Configuring threads +//! +//! A new thread can be configured before it is spawned via the `Builder` type, +//! which currently allows you to set the name, stack size, and writers for +//! `println!` and `panic!` for the child thread: +//! +//! ```rust +//! use std::thread; +//! +//! thread::Builder::new().name("child1".to_string()).spawn(move || { +//! println!("Hello, world!"); +//! }); +//! ``` +//! +//! ## Blocking support: park and unpark +//! +//! Every thread is equipped with some basic low-level blocking support, via the +//! `park` and `unpark` functions. +//! +//! Conceptually, each `Thread` handle has an associated token, which is +//! initially not present: +//! +//! * The `thread::park()` function blocks the current thread unless or until +//! the token is available for its thread handle, at which point it atomically +//! consumes the token. It may also return *spuriously*, without consuming the +//! token. `thread::park_timeout()` does the same, but allows specifying a +//! maximum time to block the thread for. +//! +//! * The `unpark()` method on a `Thread` atomically makes the token available +//! if it wasn't already. +//! +//! In other words, each `Thread` acts a bit like a semaphore with initial count +//! 0, except that the semaphore is *saturating* (the count cannot go above 1), +//! and can return spuriously. +//! +//! The API is typically used by acquiring a handle to the current thread, +//! placing that handle in a shared data structure so that other threads can +//! find it, and then `park`ing. When some desired condition is met, another +//! thread calls `unpark` on the handle. +//! +//! The motivation for this design is twofold: +//! +//! * It avoids the need to allocate mutexes and condvars when building new +//! synchronization primitives; the threads already provide basic blocking/signaling. +//! +//! * It can be implemented very efficiently on many platforms. +//! +//! ## Thread-local storage +//! +//! This module also provides an implementation of thread local storage for Rust +//! programs. Thread local storage is a method of storing data into a global +//! variable which each thread in the program will have its own copy of. +//! Threads do not share this data, so accesses do not need to be synchronized. +//! +//! At a high level, this module provides two variants of storage: +//! +//! * Owned thread-local storage. This is a type of thread local key which +//! owns the value that it contains, and will destroy the value when the +//! thread exits. This variant is created with the `thread_local!` macro and +//! can contain any value which is `'static` (no borrowed pointers). +//! +//! * Scoped thread-local storage. This type of key is used to store a reference +//! to a value into local storage temporarily for the scope of a function +//! call. There are no restrictions on what types of values can be placed +//! into this key. +//! +//! Both forms of thread local storage provide an accessor function, `with`, +//! which will yield a shared reference to the value to the specified +//! closure. Thread-local keys only allow shared access to values as there is no +//! way to guarantee uniqueness if a mutable borrow was allowed. Most values +//! will want to make use of some form of **interior mutability** through the +//! `Cell` or `RefCell` types. + +#![stable(feature = "rust1", since = "1.0.0")] + +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::__local::{LocalKey, LocalKeyState}; + +#[unstable(feature = "scoped_tls", + reason = "scoped TLS has yet to have wide enough use to fully consider \ + stabilizing its interface")] +pub use self::__scoped::ScopedKey; + +use prelude::v1::*; + +use any::Any; +use cell::UnsafeCell; +use fmt; +use io; +use marker::PhantomData; +use rt::{self, unwind}; +use sync::{Mutex, Condvar, Arc}; +use sys::thread as imp; +use sys_common::{stack, thread_info}; +use thunk::Thunk; +use time::Duration; + +#[allow(deprecated)] use old_io::Writer; + +//////////////////////////////////////////////////////////////////////////////// +// Thread-local storage +//////////////////////////////////////////////////////////////////////////////// + +#[macro_use] +#[doc(hidden)] +#[path = "local.rs"] pub mod __local; + +#[macro_use] +#[doc(hidden)] +#[path = "scoped.rs"] pub mod __scoped; + +//////////////////////////////////////////////////////////////////////////////// +// Builder +//////////////////////////////////////////////////////////////////////////////// + +/// Thread configuration. Provides detailed control over the properties +/// and behavior of new threads. +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Builder { + // A name for the thread-to-be, for identification in panic messages + name: Option<String>, + // The size of the stack for the spawned thread + stack_size: Option<usize>, +} + +impl Builder { + /// Generate the base configuration for spawning a thread, from which + /// configuration methods can be chained. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn new() -> Builder { + Builder { + name: None, + stack_size: None, + } + } + + /// Name the thread-to-be. Currently the name is used for identification + /// only in panic messages. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn name(mut self, name: String) -> Builder { + self.name = Some(name); + self + } + + /// Set the size of the stack for the new thread. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn stack_size(mut self, size: usize) -> Builder { + self.stack_size = Some(size); + self + } + + /// Redirect thread-local stdout. + #[unstable(feature = "std_misc", + reason = "Will likely go away after proc removal")] + #[deprecated(since = "1.0.0", + reason = "the old I/O module is deprecated and this function \ + will be removed with no replacement")] + #[allow(deprecated)] + pub fn stdout(self, _stdout: Box<Writer + Send + 'static>) -> Builder { + self + } + + /// Redirect thread-local stderr. + #[unstable(feature = "std_misc", + reason = "Will likely go away after proc removal")] + #[deprecated(since = "1.0.0", + reason = "the old I/O module is deprecated and this function \ + will be removed with no replacement")] + #[allow(deprecated)] + pub fn stderr(self, _stderr: Box<Writer + Send + 'static>) -> Builder { + self + } + + /// Spawn a new thread, and return a join handle for it. + /// + /// The child thread may outlive the parent (unless the parent thread + /// is the main thread; the whole process is terminated when the main + /// thread finishes.) The join handle can be used to block on + /// termination of the child thread, including recovering its panics. + /// + /// # Errors + /// + /// Unlike the `spawn` free function, this method yields an + /// `io::Result` to capture any failure to create the thread at + /// the OS level. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn spawn<F>(self, f: F) -> io::Result<JoinHandle> where + F: FnOnce(), F: Send + 'static + { + self.spawn_inner(Thunk::new(f)).map(|i| JoinHandle(i)) + } + + /// Spawn a new child thread that must be joined within a given + /// scope, and return a `JoinGuard`. + /// + /// The join guard can be used to explicitly join the child thread (via + /// `join`), returning `Result<T>`, or it will implicitly join the child + /// upon being dropped. Because the child thread may refer to data on the + /// current thread's stack (hence the "scoped" name), it cannot be detached; + /// it *must* be joined before the relevant stack frame is popped. See the + /// module documentation for additional details. + /// + /// # Errors + /// + /// Unlike the `scoped` free function, this method yields an + /// `io::Result` to capture any failure to create the thread at + /// the OS level. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn scoped<'a, T, F>(self, f: F) -> io::Result<JoinGuard<'a, T>> where + T: Send + 'a, F: FnOnce() -> T, F: Send + 'a + { + self.spawn_inner(Thunk::new(f)).map(|inner| { + JoinGuard { inner: inner, _marker: PhantomData } + }) + } + + fn spawn_inner<T: Send>(self, f: Thunk<(), T>) -> io::Result<JoinInner<T>> { + let Builder { name, stack_size } = self; + + let stack_size = stack_size.unwrap_or(rt::min_stack()); + + let my_thread = Thread::new(name); + let their_thread = my_thread.clone(); + + let my_packet = Packet(Arc::new(UnsafeCell::new(None))); + let their_packet = Packet(my_packet.0.clone()); + + // Spawning a new OS thread guarantees that __morestack will never get + // triggered, but we must manually set up the actual stack bounds once + // this function starts executing. This raises the lower limit by a bit + // because by the time that this function is executing we've already + // consumed at least a little bit of stack (we don't know the exact byte + // address at which our stack started). + let main = move || { + let something_around_the_top_of_the_stack = 1; + let addr = &something_around_the_top_of_the_stack as *const i32; + let my_stack_top = addr as usize; + let my_stack_bottom = my_stack_top - stack_size + 1024; + unsafe { + if let Some(name) = their_thread.name() { + imp::set_name(name); + } + stack::record_os_managed_stack_bounds(my_stack_bottom, + my_stack_top); + thread_info::set(imp::guard::current(), their_thread); + } + + let mut output = None; + let try_result = { + let ptr = &mut output; + + // There are two primary reasons that general try/catch is + // unsafe. The first is that we do not support nested + // try/catch. The fact that this is happening in a newly-spawned + // thread suffices. The second is that unwinding while unwinding + // is not defined. We take care of that by having an + // 'unwinding' flag in the thread itself. For these reasons, + // this unsafety should be ok. + unsafe { + unwind::try(move || *ptr = Some(f.invoke(()))) + } + }; + unsafe { + *their_packet.0.get() = Some(match (output, try_result) { + (Some(data), Ok(_)) => Ok(data), + (None, Err(cause)) => Err(cause), + _ => unreachable!() + }); + } + }; + + Ok(JoinInner { + native: try!(unsafe { imp::create(stack_size, Thunk::new(main)) }), + thread: my_thread, + packet: my_packet, + joined: false, + }) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Free functions +//////////////////////////////////////////////////////////////////////////////// + +/// Spawn a new thread, returning a `JoinHandle` for it. +/// +/// The join handle will implicitly *detach* the child thread upon being +/// dropped. In this case, the child thread may outlive the parent (unless +/// the parent thread is the main thread; the whole process is terminated when +/// the main thread finishes.) Additionally, the join handle provides a `join` +/// method that can be used to join the child thread. If the child thread +/// panics, `join` will return an `Err` containing the argument given to +/// `panic`. +/// +/// # Panics +/// +/// Panicks if the OS fails to create a thread; use `Builder::spawn` +/// to recover from such errors. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn spawn<F>(f: F) -> JoinHandle where F: FnOnce(), F: Send + 'static { + Builder::new().spawn(f).unwrap() +} + +/// Spawn a new *scoped* thread, returning a `JoinGuard` for it. +/// +/// The join guard can be used to explicitly join the child thread (via +/// `join`), returning `Result<T>`, or it will implicitly join the child +/// upon being dropped. Because the child thread may refer to data on the +/// current thread's stack (hence the "scoped" name), it cannot be detached; +/// it *must* be joined before the relevant stack frame is popped. See the +/// module documentation for additional details. +/// +/// # Panics +/// +/// Panicks if the OS fails to create a thread; use `Builder::scoped` +/// to recover from such errors. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn scoped<'a, T, F>(f: F) -> JoinGuard<'a, T> where + T: Send + 'a, F: FnOnce() -> T, F: Send + 'a +{ + Builder::new().scoped(f).unwrap() +} + +/// Gets a handle to the thread that invokes it. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn current() -> Thread { + thread_info::current_thread() +} + +/// Cooperatively give up a timeslice to the OS scheduler. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn yield_now() { + unsafe { imp::yield_now() } +} + +/// Determines whether the current thread is unwinding because of panic. +#[inline] +#[stable(feature = "rust1", since = "1.0.0")] +pub fn panicking() -> bool { + unwind::panicking() +} + +/// Put the current thread to sleep for the specified amount of time. +/// +/// The thread may sleep longer than the duration specified due to scheduling +/// specifics or platform-dependent functionality. Note that on unix platforms +/// this function will not return early due to a signal being received or a +/// spurious wakeup. +#[unstable(feature = "thread_sleep", + reason = "recently added, needs an RFC, and `Duration` itself is \ + unstable")] +pub fn sleep(dur: Duration) { + imp::sleep(dur) +} + +/// Block unless or until the current thread's token is made available (may wake spuriously). +/// +/// See the module doc for more detail. +// +// The implementation currently uses the trivial strategy of a Mutex+Condvar +// with wakeup flag, which does not actually allow spurious wakeups. In the +// future, this will be implemented in a more efficient way, perhaps along the lines of +// http://cr.openjdk.java.net/~stefank/6989984.1/raw_files/new/src/os/linux/vm/os_linux.cpp +// or futuxes, and in either case may allow spurious wakeups. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn park() { + let thread = current(); + let mut guard = thread.inner.lock.lock().unwrap(); + while !*guard { + guard = thread.inner.cvar.wait(guard).unwrap(); + } + *guard = false; +} + +/// Block unless or until the current thread's token is made available or +/// the specified duration has been reached (may wake spuriously). +/// +/// The semantics of this function are equivalent to `park()` except that the +/// thread will be blocked for roughly no longer than *duration*. This method +/// should not be used for precise timing due to anomalies such as +/// preemption or platform differences that may not cause the maximum +/// amount of time waited to be precisely *duration* long. +/// +/// See the module doc for more detail. +#[unstable(feature = "std_misc", reason = "recently introduced, depends on Duration")] +pub fn park_timeout(duration: Duration) { + let thread = current(); + let mut guard = thread.inner.lock.lock().unwrap(); + if !*guard { + let (g, _) = thread.inner.cvar.wait_timeout(guard, duration).unwrap(); + guard = g; + } + *guard = false; +} + +//////////////////////////////////////////////////////////////////////////////// +// Thread +//////////////////////////////////////////////////////////////////////////////// + +/// The internal representation of a `Thread` handle +struct Inner { + name: Option<String>, + lock: Mutex<bool>, // true when there is a buffered unpark + cvar: Condvar, +} + +unsafe impl Sync for Inner {} + +#[derive(Clone)] +#[stable(feature = "rust1", since = "1.0.0")] +/// A handle to a thread. +pub struct Thread { + inner: Arc<Inner>, +} + +impl Thread { + // Used only internally to construct a thread object without spawning + fn new(name: Option<String>) -> Thread { + Thread { + inner: Arc::new(Inner { + name: name, + lock: Mutex::new(false), + cvar: Condvar::new(), + }) + } + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[unstable(feature = "std_misc", + reason = "may change with specifics of new Send semantics")] + pub fn spawn<F>(f: F) -> Thread where F: FnOnce(), F: Send + 'static { + Builder::new().spawn(f).unwrap().thread().clone() + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[unstable(feature = "std_misc", + reason = "may change with specifics of new Send semantics")] + pub fn scoped<'a, T, F>(f: F) -> JoinGuard<'a, T> where + T: Send + 'a, F: FnOnce() -> T, F: Send + 'a + { + Builder::new().scoped(f).unwrap() + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[stable(feature = "rust1", since = "1.0.0")] + pub fn current() -> Thread { + thread_info::current_thread() + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[unstable(feature = "std_misc", reason = "name may change")] + pub fn yield_now() { + unsafe { imp::yield_now() } + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + pub fn panicking() -> bool { + unwind::panicking() + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[unstable(feature = "std_misc", reason = "recently introduced")] + pub fn park() { + let thread = current(); + let mut guard = thread.inner.lock.lock().unwrap(); + while !*guard { + guard = thread.inner.cvar.wait(guard).unwrap(); + } + *guard = false; + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[unstable(feature = "std_misc", reason = "recently introduced")] + pub fn park_timeout(duration: Duration) { + let thread = current(); + let mut guard = thread.inner.lock.lock().unwrap(); + if !*guard { + let (g, _) = thread.inner.cvar.wait_timeout(guard, duration).unwrap(); + guard = g; + } + *guard = false; + } + + /// Atomically makes the handle's token available if it is not already. + /// + /// See the module doc for more detail. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn unpark(&self) { + let mut guard = self.inner.lock.lock().unwrap(); + if !*guard { + *guard = true; + self.inner.cvar.notify_one(); + } + } + + /// Get the thread's name. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn name(&self) -> Option<&str> { + self.inner.name.as_ref().map(|s| &**s) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for Thread { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.name(), f) + } +} + +// a hack to get around privacy restrictions +impl thread_info::NewThread for Thread { + fn new(name: Option<String>) -> Thread { Thread::new(name) } +} + +//////////////////////////////////////////////////////////////////////////////// +// JoinHandle and JoinGuard +//////////////////////////////////////////////////////////////////////////////// + +/// Indicates the manner in which a thread exited. +/// +/// A thread that completes without panicking is considered to exit successfully. +#[stable(feature = "rust1", since = "1.0.0")] +pub type Result<T> = ::result::Result<T, Box<Any + Send + 'static>>; + +struct Packet<T>(Arc<UnsafeCell<Option<Result<T>>>>); + +unsafe impl<T:Send> Send for Packet<T> {} +unsafe impl<T> Sync for Packet<T> {} + +/// Inner representation for JoinHandle and JoinGuard +struct JoinInner<T> { + native: imp::rust_thread, + thread: Thread, + packet: Packet<T>, + joined: bool, +} + +impl<T> JoinInner<T> { + fn join(&mut self) -> Result<T> { + assert!(!self.joined); + unsafe { imp::join(self.native) }; + self.joined = true; + unsafe { + (*self.packet.0.get()).take().unwrap() + } + } +} + +/// An owned permission to join on a thread (block on its termination). +/// +/// Unlike a `JoinGuard`, a `JoinHandle` *detaches* the child thread +/// when it is dropped, rather than automatically joining on drop. +/// +/// Due to platform restrictions, it is not possible to `Clone` this +/// handle: the ability to join a child thread is a uniquely-owned +/// permission. +#[stable(feature = "rust1", since = "1.0.0")] +pub struct JoinHandle(JoinInner<()>); + +impl JoinHandle { + /// Extract a handle to the underlying thread + #[stable(feature = "rust1", since = "1.0.0")] + pub fn thread(&self) -> &Thread { + &self.0.thread + } + + /// Wait for the associated thread to finish. + /// + /// If the child thread panics, `Err` is returned with the parameter given + /// to `panic`. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn join(mut self) -> Result<()> { + self.0.join() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Drop for JoinHandle { + fn drop(&mut self) { + if !self.0.joined { + unsafe { imp::detach(self.0.native) } + } + } +} + +/// An RAII-style guard that will block until thread termination when dropped. +/// +/// The type `T` is the return type for the thread's main function. +/// +/// Joining on drop is necessary to ensure memory safety when stack +/// data is shared between a parent and child thread. +/// +/// Due to platform restrictions, it is not possible to `Clone` this +/// handle: the ability to join a child thread is a uniquely-owned +/// permission. +#[must_use = "thread will be immediately joined if `JoinGuard` is not used"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct JoinGuard<'a, T: 'a> { + inner: JoinInner<T>, + _marker: PhantomData<&'a T>, +} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<'a, T: Send + 'a> Sync for JoinGuard<'a, T> {} + +impl<'a, T: Send + 'a> JoinGuard<'a, T> { + /// Extract a handle to the thread this guard will join on. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn thread(&self) -> &Thread { + &self.inner.thread + } + + /// Wait for the associated thread to finish, returning the result of the thread's + /// calculation. + /// + /// # Panics + /// + /// Panics on the child thread are propagated by panicking the parent. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn join(mut self) -> T { + match self.inner.join() { + Ok(res) => res, + Err(_) => panic!("child thread {:?} panicked", self.thread()), + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl<T: Send> JoinGuard<'static, T> { + /// Detaches the child thread, allowing it to outlive its parent. + #[deprecated(since = "1.0.0", reason = "use spawn instead")] + #[unstable(feature = "std_misc")] + pub fn detach(mut self) { + unsafe { imp::detach(self.inner.native) }; + self.inner.joined = true; // avoid joining in the destructor + } +} + +#[unsafe_destructor] +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, T: Send + 'a> Drop for JoinGuard<'a, T> { + fn drop(&mut self) { + if !self.inner.joined { + if self.inner.join().is_err() { + panic!("child thread {:?} panicked", self.thread()); + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod test { + use prelude::v1::*; + + use any::Any; + use sync::mpsc::{channel, Sender}; + use boxed::BoxAny; + use result; + use std::old_io::{ChanReader, ChanWriter}; + use super::{Builder}; + use thread; + use thunk::Thunk; + use time::Duration; + + // !!! These tests are dangerous. If something is buggy, they will hang, !!! + // !!! instead of exiting cleanly. This might wedge the buildbots. !!! + + #[test] + fn test_unnamed_thread() { + thread::spawn(move|| { + assert!(thread::current().name().is_none()); + }).join().ok().unwrap(); + } + + #[test] + fn test_named_thread() { + Builder::new().name("ada lovelace".to_string()).scoped(move|| { + assert!(thread::current().name().unwrap() == "ada lovelace".to_string()); + }).unwrap().join(); + } + + #[test] + fn test_run_basic() { + let (tx, rx) = channel(); + thread::spawn(move|| { + tx.send(()).unwrap(); + }); + rx.recv().unwrap(); + } + + #[test] + fn test_join_success() { + assert!(thread::scoped(move|| -> String { + "Success!".to_string() + }).join() == "Success!"); + } + + #[test] + fn test_join_panic() { + match thread::spawn(move|| { + panic!() + }).join() { + result::Result::Err(_) => (), + result::Result::Ok(()) => panic!() + } + } + + #[test] + fn test_scoped_success() { + let res = thread::scoped(move|| -> String { + "Success!".to_string() + }).join(); + assert!(res == "Success!"); + } + + #[test] + #[should_fail] + fn test_scoped_panic() { + thread::scoped(|| panic!()).join(); + } + + #[test] + #[should_fail] + fn test_scoped_implicit_panic() { + let _ = thread::scoped(|| panic!()); + } + + #[test] + fn test_spawn_sched() { + use clone::Clone; + + let (tx, rx) = channel(); + + fn f(i: i32, tx: Sender<()>) { + let tx = tx.clone(); + thread::spawn(move|| { + if i == 0 { + tx.send(()).unwrap(); + } else { + f(i - 1, tx); + } + }); + + } + f(10, tx); + rx.recv().unwrap(); + } + + #[test] + fn test_spawn_sched_childs_on_default_sched() { + let (tx, rx) = channel(); + + thread::spawn(move|| { + thread::spawn(move|| { + tx.send(()).unwrap(); + }); + }); + + rx.recv().unwrap(); + } + + fn avoid_copying_the_body<F>(spawnfn: F) where F: FnOnce(Thunk<'static>) { + let (tx, rx) = channel(); + + let x: Box<_> = box 1; + let x_in_parent = (&*x) as *const i32 as usize; + + spawnfn(Thunk::new(move|| { + let x_in_child = (&*x) as *const i32 as usize; + tx.send(x_in_child).unwrap(); + })); + + let x_in_child = rx.recv().unwrap(); + assert_eq!(x_in_parent, x_in_child); + } + + #[test] + fn test_avoid_copying_the_body_spawn() { + avoid_copying_the_body(|v| { + thread::spawn(move || v.invoke(())); + }); + } + + #[test] + fn test_avoid_copying_the_body_thread_spawn() { + avoid_copying_the_body(|f| { + thread::spawn(move|| { + f.invoke(()); + }); + }) + } + + #[test] + fn test_avoid_copying_the_body_join() { + avoid_copying_the_body(|f| { + let _ = thread::spawn(move|| { + f.invoke(()) + }).join(); + }) + } + + #[test] + fn test_child_doesnt_ref_parent() { + // If the child refcounts the parent task, this will stack overflow when + // climbing the task tree to dereference each ancestor. (See #1789) + // (well, it would if the constant were 8000+ - I lowered it to be more + // valgrind-friendly. try this at home, instead..!) + const GENERATIONS: u32 = 16; + fn child_no(x: u32) -> Thunk<'static> { + return Thunk::new(move|| { + if x < GENERATIONS { + thread::spawn(move|| child_no(x+1).invoke(())); + } + }); + } + thread::spawn(|| child_no(0).invoke(())); + } + + #[test] + fn test_simple_newsched_spawn() { + thread::spawn(move || {}); + } + + #[test] + fn test_try_panic_message_static_str() { + match thread::spawn(move|| { + panic!("static string"); + }).join() { + Err(e) => { + type T = &'static str; + assert!(e.is::<T>()); + assert_eq!(*e.downcast::<T>().unwrap(), "static string"); + } + Ok(()) => panic!() + } + } + + #[test] + fn test_try_panic_message_owned_str() { + match thread::spawn(move|| { + panic!("owned string".to_string()); + }).join() { + Err(e) => { + type T = String; + assert!(e.is::<T>()); + assert_eq!(*e.downcast::<T>().unwrap(), "owned string".to_string()); + } + Ok(()) => panic!() + } + } + + #[test] + fn test_try_panic_message_any() { + match thread::spawn(move|| { + panic!(box 413u16 as Box<Any + Send>); + }).join() { + Err(e) => { + type T = Box<Any + Send>; + assert!(e.is::<T>()); + let any = e.downcast::<T>().unwrap(); + assert!(any.is::<u16>()); + assert_eq!(*any.downcast::<u16>().unwrap(), 413); + } + Ok(()) => panic!() + } + } + + #[test] + fn test_try_panic_message_unit_struct() { + struct Juju; + + match thread::spawn(move|| { + panic!(Juju) + }).join() { + Err(ref e) if e.is::<Juju>() => {} + Err(_) | Ok(()) => panic!() + } + } + + #[test] + fn test_park_timeout_unpark_before() { + for _ in 0..10 { + thread::current().unpark(); + thread::park_timeout(Duration::seconds(10_000_000)); + } + } + + #[test] + fn test_park_timeout_unpark_not_called() { + for _ in 0..10 { + thread::park_timeout(Duration::milliseconds(10)); + } + } + + #[test] + fn test_park_timeout_unpark_called_other_thread() { + use std::old_io; + + for _ in 0..10 { + let th = thread::current(); + + let _guard = thread::spawn(move || { + old_io::timer::sleep(Duration::milliseconds(50)); + th.unpark(); + }); + + thread::park_timeout(Duration::seconds(10_000_000)); + } + } + + #[test] + fn sleep_smoke() { + thread::sleep(Duration::milliseconds(2)); + thread::sleep(Duration::milliseconds(-2)); + } + + // NOTE: the corresponding test for stderr is in run-pass/task-stderr, due + // to the test harness apparently interfering with stderr configuration. +} diff --git a/src/libstd/thread/scoped.rs b/src/libstd/thread/scoped.rs new file mode 100644 index 00000000000..2a8be2ad82c --- /dev/null +++ b/src/libstd/thread/scoped.rs @@ -0,0 +1,317 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or +// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license +// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Scoped thread-local storage +//! +//! This module provides the ability to generate *scoped* thread-local +//! variables. In this sense, scoped indicates that thread local storage +//! actually stores a reference to a value, and this reference is only placed +//! in storage for a scoped amount of time. +//! +//! There are no restrictions on what types can be placed into a scoped +//! variable, but all scoped variables are initialized to the equivalent of +//! null. Scoped thread local storage is useful when a value is present for a known +//! period of time and it is not required to relinquish ownership of the +//! contents. +//! +//! # Examples +//! +//! ``` +//! scoped_thread_local!(static FOO: u32); +//! +//! // Initially each scoped slot is empty. +//! assert!(!FOO.is_set()); +//! +//! // When inserting a value, the value is only in place for the duration +//! // of the closure specified. +//! FOO.set(&1, || { +//! FOO.with(|slot| { +//! assert_eq!(*slot, 1); +//! }); +//! }); +//! ``` + +#![unstable(feature = "thread_local_internals")] + +use prelude::v1::*; + +// macro hygiene sure would be nice, wouldn't it? +#[doc(hidden)] +pub mod __impl { + pub use super::imp::KeyInner; + pub use sys_common::thread_local::INIT as OS_INIT; +} + +/// Type representing a thread local storage key corresponding to a reference +/// to the type parameter `T`. +/// +/// Keys are statically allocated and can contain a reference to an instance of +/// type `T` scoped to a particular lifetime. Keys provides two methods, `set` +/// and `with`, both of which currently use closures to control the scope of +/// their contents. +#[unstable(feature = "scoped_tls", + reason = "scoped TLS has yet to have wide enough use to fully consider \ + stabilizing its interface")] +pub struct ScopedKey<T> { #[doc(hidden)] pub inner: __impl::KeyInner<T> } + +/// Declare a new scoped thread local storage key. +/// +/// This macro declares a `static` item on which methods are used to get and +/// set the value stored within. +#[macro_export] +#[allow_internal_unstable] +macro_rules! scoped_thread_local { + (static $name:ident: $t:ty) => ( + __scoped_thread_local_inner!(static $name: $t); + ); + (pub static $name:ident: $t:ty) => ( + __scoped_thread_local_inner!(pub static $name: $t); + ); +} + +#[macro_export] +#[doc(hidden)] +#[allow_internal_unstable] +macro_rules! __scoped_thread_local_inner { + (static $name:ident: $t:ty) => ( + #[cfg_attr(not(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64")), + thread_local)] + static $name: ::std::thread::ScopedKey<$t> = + __scoped_thread_local_inner!($t); + ); + (pub static $name:ident: $t:ty) => ( + #[cfg_attr(not(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64")), + thread_local)] + pub static $name: ::std::thread::ScopedKey<$t> = + __scoped_thread_local_inner!($t); + ); + ($t:ty) => ({ + use std::thread::ScopedKey as __Key; + + #[cfg(not(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64")))] + const _INIT: __Key<$t> = __Key { + inner: ::std::thread::__scoped::__impl::KeyInner { + inner: ::std::cell::UnsafeCell { value: 0 as *mut _ }, + } + }; + + #[cfg(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64"))] + const _INIT: __Key<$t> = __Key { + inner: ::std::thread::__scoped::__impl::KeyInner { + inner: ::std::thread::__scoped::__impl::OS_INIT, + marker: ::std::marker::PhantomData::<::std::cell::Cell<$t>>, + } + }; + + _INIT + }) +} + +#[unstable(feature = "scoped_tls", + reason = "scoped TLS has yet to have wide enough use to fully consider \ + stabilizing its interface")] +impl<T> ScopedKey<T> { + /// Insert a value into this scoped thread local storage slot for a + /// duration of a closure. + /// + /// While `cb` is running, the value `t` will be returned by `get` unless + /// this function is called recursively inside of `cb`. + /// + /// Upon return, this function will restore the previous value, if any + /// was available. + /// + /// # Examples + /// + /// ``` + /// scoped_thread_local!(static FOO: u32); + /// + /// FOO.set(&100, || { + /// let val = FOO.with(|v| *v); + /// assert_eq!(val, 100); + /// + /// // set can be called recursively + /// FOO.set(&101, || { + /// // ... + /// }); + /// + /// // Recursive calls restore the previous value. + /// let val = FOO.with(|v| *v); + /// assert_eq!(val, 100); + /// }); + /// ``` + pub fn set<R, F>(&'static self, t: &T, cb: F) -> R where + F: FnOnce() -> R, + { + struct Reset<'a, T: 'a> { + key: &'a __impl::KeyInner<T>, + val: *mut T, + } + #[unsafe_destructor] + impl<'a, T> Drop for Reset<'a, T> { + fn drop(&mut self) { + unsafe { self.key.set(self.val) } + } + } + + let prev = unsafe { + let prev = self.inner.get(); + self.inner.set(t as *const T as *mut T); + prev + }; + + let _reset = Reset { key: &self.inner, val: prev }; + cb() + } + + /// Get a value out of this scoped variable. + /// + /// This function takes a closure which receives the value of this + /// variable. + /// + /// # Panics + /// + /// This function will panic if `set` has not previously been called. + /// + /// # Examples + /// + /// ```no_run + /// scoped_thread_local!(static FOO: u32); + /// + /// FOO.with(|slot| { + /// // work with `slot` + /// }); + /// ``` + pub fn with<R, F>(&'static self, cb: F) -> R where + F: FnOnce(&T) -> R + { + unsafe { + let ptr = self.inner.get(); + assert!(!ptr.is_null(), "cannot access a scoped thread local \ + variable without calling `set` first"); + cb(&*ptr) + } + } + + /// Test whether this TLS key has been `set` for the current thread. + pub fn is_set(&'static self) -> bool { + unsafe { !self.inner.get().is_null() } + } +} + +#[cfg(not(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64")))] +mod imp { + use std::cell::UnsafeCell; + + #[doc(hidden)] + pub struct KeyInner<T> { pub inner: UnsafeCell<*mut T> } + + unsafe impl<T> ::marker::Sync for KeyInner<T> { } + + #[doc(hidden)] + impl<T> KeyInner<T> { + #[doc(hidden)] + pub unsafe fn set(&self, ptr: *mut T) { *self.inner.get() = ptr; } + #[doc(hidden)] + pub unsafe fn get(&self) -> *mut T { *self.inner.get() } + } +} + +#[cfg(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64"))] +mod imp { + use marker; + use std::cell::Cell; + use sys_common::thread_local::StaticKey as OsStaticKey; + + #[doc(hidden)] + pub struct KeyInner<T> { + pub inner: OsStaticKey, + pub marker: marker::PhantomData<Cell<T>>, + } + + unsafe impl<T> ::marker::Sync for KeyInner<T> { } + + #[doc(hidden)] + impl<T> KeyInner<T> { + #[doc(hidden)] + pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr as *mut _) } + #[doc(hidden)] + pub unsafe fn get(&self) -> *mut T { self.inner.get() as *mut _ } + } +} + + +#[cfg(test)] +mod tests { + use cell::Cell; + use prelude::v1::*; + + scoped_thread_local!(static FOO: u32); + + #[test] + fn smoke() { + scoped_thread_local!(static BAR: u32); + + assert!(!BAR.is_set()); + BAR.set(&1, || { + assert!(BAR.is_set()); + BAR.with(|slot| { + assert_eq!(*slot, 1); + }); + }); + assert!(!BAR.is_set()); + } + + #[test] + fn cell_allowed() { + scoped_thread_local!(static BAR: Cell<u32>); + + BAR.set(&Cell::new(1), || { + BAR.with(|slot| { + assert_eq!(slot.get(), 1); + }); + }); + } + + #[test] + fn scope_item_allowed() { + assert!(!FOO.is_set()); + FOO.set(&1, || { + assert!(FOO.is_set()); + FOO.with(|slot| { + assert_eq!(*slot, 1); + }); + }); + assert!(!FOO.is_set()); + } +} |
