From 6bd3ab0d8140053475a901ad4e2e80e98955bcb0 Mon Sep 17 00:00:00 2001 From: Aaron Turon Date: Fri, 20 Mar 2015 00:46:13 -0700 Subject: Implement RFC 909: move thread_local into thread This commit implements [RFC 909](https://github.com/rust-lang/rfcs/pull/909): The `std::thread_local` module is now deprecated, and its contents are available directly in `std::thread` as `LocalKey`, `LocalKeyState`, and `ScopedKey`. The macros remain exactly as they were, which means little if any code should break. Nevertheless, this is technically a: [breaking-change] Closes #23547 --- src/libstd/thread/local.rs | 735 +++++++++++++++++++++++++++++++ src/libstd/thread/mod.rs | 1026 +++++++++++++++++++++++++++++++++++++++++++ src/libstd/thread/scoped.rs | 317 +++++++++++++ 3 files changed, 2078 insertions(+) create mode 100644 src/libstd/thread/local.rs create mode 100644 src/libstd/thread/mod.rs create mode 100644 src/libstd/thread/scoped.rs (limited to 'src/libstd/thread') diff --git a/src/libstd/thread/local.rs b/src/libstd/thread/local.rs new file mode 100644 index 00000000000..43142d2e5bc --- /dev/null +++ b/src/libstd/thread/local.rs @@ -0,0 +1,735 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Thread local storage + +#![unstable(feature = "thread_local_internals")] + +use prelude::v1::*; + +use cell::UnsafeCell; + +// Sure wish we had macro hygiene, no? +#[doc(hidden)] +#[unstable(feature = "thread_local_internals")] +pub mod __impl { + pub use super::imp::Key as KeyInner; + pub use super::imp::destroy_value; + pub use sys_common::thread_local::INIT_INNER as OS_INIT_INNER; + pub use sys_common::thread_local::StaticKey as OsStaticKey; +} + +/// A thread local storage key which owns its contents. +/// +/// This key uses the fastest possible implementation available to it for the +/// target platform. It is instantiated with the `thread_local!` macro and the +/// primary method is the `with` method. +/// +/// The `with` method yields a reference to the contained value which cannot be +/// sent across tasks or escape the given closure. +/// +/// # Initialization and Destruction +/// +/// Initialization is dynamically performed on the first call to `with()` +/// within a thread, and values support destructors which will be run when a +/// thread exits. +/// +/// # Examples +/// +/// ``` +/// use std::cell::RefCell; +/// use std::thread; +/// +/// thread_local!(static FOO: RefCell = RefCell::new(1)); +/// +/// FOO.with(|f| { +/// assert_eq!(*f.borrow(), 1); +/// *f.borrow_mut() = 2; +/// }); +/// +/// // each thread starts out with the initial value of 1 +/// thread::spawn(move|| { +/// FOO.with(|f| { +/// assert_eq!(*f.borrow(), 1); +/// *f.borrow_mut() = 3; +/// }); +/// }); +/// +/// // we retain our original value of 2 despite the child thread +/// FOO.with(|f| { +/// assert_eq!(*f.borrow(), 2); +/// }); +/// ``` +#[stable(feature = "rust1", since = "1.0.0")] +pub struct LocalKey { + // The key itself may be tagged with #[thread_local], and this `Key` is + // stored as a `static`, and it's not valid for a static to reference the + // address of another thread_local static. For this reason we kinda wonkily + // work around this by generating a shim function which will give us the + // address of the inner TLS key at runtime. + // + // This is trivially devirtualizable by LLVM because we never store anything + // to this field and rustc can declare the `static` as constant as well. + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub inner: fn() -> &'static __impl::KeyInner>>, + + // initialization routine to invoke to create a value + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub init: fn() -> T, +} + +/// Declare a new thread local storage key of type `std::thread::LocalKey`. +#[macro_export] +#[stable(feature = "rust1", since = "1.0.0")] +#[allow_internal_unstable] +macro_rules! thread_local { + (static $name:ident: $t:ty = $init:expr) => ( + static $name: ::std::thread::LocalKey<$t> = { + use std::cell::UnsafeCell as __UnsafeCell; + use std::thread::__local::__impl::KeyInner as __KeyInner; + use std::option::Option as __Option; + use std::option::Option::None as __None; + + __thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = { + __UnsafeCell { value: __None } + }); + fn __init() -> $t { $init } + fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> { + &__KEY + } + ::std::thread::LocalKey { inner: __getit, init: __init } + }; + ); + (pub static $name:ident: $t:ty = $init:expr) => ( + pub static $name: ::std::thread::LocalKey<$t> = { + use std::cell::UnsafeCell as __UnsafeCell; + use std::thread::__local::__impl::KeyInner as __KeyInner; + use std::option::Option as __Option; + use std::option::Option::None as __None; + + __thread_local_inner!(static __KEY: __UnsafeCell<__Option<$t>> = { + __UnsafeCell { value: __None } + }); + fn __init() -> $t { $init } + fn __getit() -> &'static __KeyInner<__UnsafeCell<__Option<$t>>> { + &__KEY + } + ::std::thread::LocalKey { inner: __getit, init: __init } + }; + ); +} + +// Macro pain #4586: +// +// When cross compiling, rustc will load plugins and macros from the *host* +// platform before search for macros from the target platform. This is primarily +// done to detect, for example, plugins. Ideally the macro below would be +// defined once per module below, but unfortunately this means we have the +// following situation: +// +// 1. We compile libstd for x86_64-unknown-linux-gnu, this thread_local!() macro +// will inject #[thread_local] statics. +// 2. We then try to compile a program for arm-linux-androideabi +// 3. The compiler has a host of linux and a target of android, so it loads +// macros from the *linux* libstd. +// 4. The macro generates a #[thread_local] field, but the android libstd does +// not use #[thread_local] +// 5. Compile error about structs with wrong fields. +// +// To get around this, we're forced to inject the #[cfg] logic into the macro +// itself. Woohoo. + +#[macro_export] +#[doc(hidden)] +#[allow_internal_unstable] +macro_rules! __thread_local_inner { + (static $name:ident: $t:ty = $init:expr) => ( + #[cfg_attr(all(any(target_os = "macos", target_os = "linux"), + not(target_arch = "aarch64")), + thread_local)] + static $name: ::std::thread::__local::__impl::KeyInner<$t> = + __thread_local_inner!($init, $t); + ); + (pub static $name:ident: $t:ty = $init:expr) => ( + #[cfg_attr(all(any(target_os = "macos", target_os = "linux"), + not(target_arch = "aarch64")), + thread_local)] + pub static $name: ::std::thread::__local::__impl::KeyInner<$t> = + __thread_local_inner!($init, $t); + ); + ($init:expr, $t:ty) => ({ + #[cfg(all(any(target_os = "macos", target_os = "linux"), not(target_arch = "aarch64")))] + const _INIT: ::std::thread::__local::__impl::KeyInner<$t> = { + ::std::thread::__local::__impl::KeyInner { + inner: ::std::cell::UnsafeCell { value: $init }, + dtor_registered: ::std::cell::UnsafeCell { value: false }, + dtor_running: ::std::cell::UnsafeCell { value: false }, + } + }; + + #[cfg(any(not(any(target_os = "macos", target_os = "linux")), target_arch = "aarch64"))] + const _INIT: ::std::thread::__local::__impl::KeyInner<$t> = { + unsafe extern fn __destroy(ptr: *mut u8) { + ::std::thread::__local::__impl::destroy_value::<$t>(ptr); + } + + ::std::thread::__local::__impl::KeyInner { + inner: ::std::cell::UnsafeCell { value: $init }, + os: ::std::thread::__local::__impl::OsStaticKey { + inner: ::std::thread::__local::__impl::OS_INIT_INNER, + dtor: ::std::option::Option::Some(__destroy as unsafe extern fn(*mut u8)), + }, + } + }; + + _INIT + }); +} + +/// Indicator of the state of a thread local storage key. +#[unstable(feature = "std_misc", + reason = "state querying was recently added")] +#[derive(Eq, PartialEq, Copy)] +pub enum LocalKeyState { + /// All keys are in this state whenever a thread starts. Keys will + /// transition to the `Valid` state once the first call to `with` happens + /// and the initialization expression succeeds. + /// + /// Keys in the `Uninitialized` state will yield a reference to the closure + /// passed to `with` so long as the initialization routine does not panic. + Uninitialized, + + /// Once a key has been accessed successfully, it will enter the `Valid` + /// state. Keys in the `Valid` state will remain so until the thread exits, + /// at which point the destructor will be run and the key will enter the + /// `Destroyed` state. + /// + /// Keys in the `Valid` state will be guaranteed to yield a reference to the + /// closure passed to `with`. + Valid, + + /// When a thread exits, the destructors for keys will be run (if + /// necessary). While a destructor is running, and possibly after a + /// destructor has run, a key is in the `Destroyed` state. + /// + /// Keys in the `Destroyed` states will trigger a panic when accessed via + /// `with`. + Destroyed, +} + +impl LocalKey { + /// Acquire a reference to the value in this TLS key. + /// + /// This will lazily initialize the value if this thread has not referenced + /// this key yet. + /// + /// # Panics + /// + /// This function will `panic!()` if the key currently has its + /// destructor running, and it **may** panic if the destructor has + /// previously been run for this thread. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn with(&'static self, f: F) -> R + where F: FnOnce(&T) -> R { + let slot = (self.inner)(); + unsafe { + let slot = slot.get().expect("cannot access a TLS value during or \ + after it is destroyed"); + f(match *slot.get() { + Some(ref inner) => inner, + None => self.init(slot), + }) + } + } + + unsafe fn init(&self, slot: &UnsafeCell>) -> &T { + // Execute the initialization up front, *then* move it into our slot, + // just in case initialization fails. + let value = (self.init)(); + let ptr = slot.get(); + *ptr = Some(value); + (*ptr).as_ref().unwrap() + } + + /// Query the current state of this key. + /// + /// A key is initially in the `Uninitialized` state whenever a thread + /// starts. It will remain in this state up until the first call to `with` + /// within a thread has run the initialization expression successfully. + /// + /// Once the initialization expression succeeds, the key transitions to the + /// `Valid` state which will guarantee that future calls to `with` will + /// succeed within the thread. + /// + /// When a thread exits, each key will be destroyed in turn, and as keys are + /// destroyed they will enter the `Destroyed` state just before the + /// destructor starts to run. Keys may remain in the `Destroyed` state after + /// destruction has completed. Keys without destructors (e.g. with types + /// that are `Copy`), may never enter the `Destroyed` state. + /// + /// Keys in the `Uninitialized` can be accessed so long as the + /// initialization does not panic. Keys in the `Valid` state are guaranteed + /// to be able to be accessed. Keys in the `Destroyed` state will panic on + /// any call to `with`. + #[unstable(feature = "std_misc", + reason = "state querying was recently added")] + pub fn state(&'static self) -> LocalKeyState { + unsafe { + match (self.inner)().get() { + Some(cell) => { + match *cell.get() { + Some(..) => LocalKeyState::Valid, + None => LocalKeyState::Uninitialized, + } + } + None => LocalKeyState::Destroyed, + } + } + } + + /// Deprecated + #[unstable(feature = "std_misc")] + #[deprecated(since = "1.0.0", + reason = "function renamed to state() and returns more info")] + pub fn destroyed(&'static self) -> bool { self.state() == LocalKeyState::Destroyed } +} + +#[cfg(all(any(target_os = "macos", target_os = "linux"), not(target_arch = "aarch64")))] +mod imp { + use prelude::v1::*; + + use cell::UnsafeCell; + use intrinsics; + use ptr; + + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub struct Key { + // Place the inner bits in an `UnsafeCell` to currently get around the + // "only Sync statics" restriction. This allows any type to be placed in + // the cell. + // + // Note that all access requires `T: 'static` so it can't be a type with + // any borrowed pointers still. + #[unstable(feature = "thread_local_internals")] + pub inner: UnsafeCell, + + // Metadata to keep track of the state of the destructor. Remember that + // these variables are thread-local, not global. + #[unstable(feature = "thread_local_internals")] + pub dtor_registered: UnsafeCell, // should be Cell + #[unstable(feature = "thread_local_internals")] + pub dtor_running: UnsafeCell, // should be Cell + } + + unsafe impl ::marker::Sync for Key { } + + #[doc(hidden)] + impl Key { + pub unsafe fn get(&'static self) -> Option<&'static T> { + if intrinsics::needs_drop::() && *self.dtor_running.get() { + return None + } + self.register_dtor(); + Some(&*self.inner.get()) + } + + unsafe fn register_dtor(&self) { + if !intrinsics::needs_drop::() || *self.dtor_registered.get() { + return + } + + register_dtor(self as *const _ as *mut u8, + destroy_value::); + *self.dtor_registered.get() = true; + } + } + + // Since what appears to be glibc 2.18 this symbol has been shipped which + // GCC and clang both use to invoke destructors in thread_local globals, so + // let's do the same! + // + // Note, however, that we run on lots older linuxes, as well as cross + // compiling from a newer linux to an older linux, so we also have a + // fallback implementation to use as well. + // + // Due to rust-lang/rust#18804, make sure this is not generic! + #[cfg(target_os = "linux")] + unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) { + use boxed; + use mem; + use libc; + use sys_common::thread_local as os; + + extern { + static __dso_handle: *mut u8; + #[linkage = "extern_weak"] + static __cxa_thread_atexit_impl: *const (); + } + if !__cxa_thread_atexit_impl.is_null() { + type F = unsafe extern fn(dtor: unsafe extern fn(*mut u8), + arg: *mut u8, + dso_handle: *mut u8) -> libc::c_int; + mem::transmute::<*const (), F>(__cxa_thread_atexit_impl) + (dtor, t, __dso_handle); + return + } + + // The fallback implementation uses a vanilla OS-based TLS key to track + // the list of destructors that need to be run for this thread. The key + // then has its own destructor which runs all the other destructors. + // + // The destructor for DTORS is a little special in that it has a `while` + // loop to continuously drain the list of registered destructors. It + // *should* be the case that this loop always terminates because we + // provide the guarantee that a TLS key cannot be set after it is + // flagged for destruction. + static DTORS: os::StaticKey = os::StaticKey { + inner: os::INIT_INNER, + dtor: Some(run_dtors as unsafe extern "C" fn(*mut u8)), + }; + type List = Vec<(*mut u8, unsafe extern fn(*mut u8))>; + if DTORS.get().is_null() { + let v: Box = box Vec::new(); + DTORS.set(boxed::into_raw(v) as *mut u8); + } + let list: &mut List = &mut *(DTORS.get() as *mut List); + list.push((t, dtor)); + + unsafe extern fn run_dtors(mut ptr: *mut u8) { + while !ptr.is_null() { + let list: Box = Box::from_raw(ptr as *mut List); + for &(ptr, dtor) in &*list { + dtor(ptr); + } + ptr = DTORS.get(); + DTORS.set(ptr::null_mut()); + } + } + } + + // OSX's analog of the above linux function is this _tlv_atexit function. + // The disassembly of thread_local globals in C++ (at least produced by + // clang) will have this show up in the output. + #[cfg(target_os = "macos")] + unsafe fn register_dtor(t: *mut u8, dtor: unsafe extern fn(*mut u8)) { + extern { + fn _tlv_atexit(dtor: unsafe extern fn(*mut u8), + arg: *mut u8); + } + _tlv_atexit(dtor, t); + } + + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub unsafe extern fn destroy_value(ptr: *mut u8) { + let ptr = ptr as *mut Key; + // Right before we run the user destructor be sure to flag the + // destructor as running for this thread so calls to `get` will return + // `None`. + *(*ptr).dtor_running.get() = true; + ptr::read((*ptr).inner.get()); + } +} + +#[cfg(any(not(any(target_os = "macos", target_os = "linux")), target_arch = "aarch64"))] +mod imp { + use prelude::v1::*; + + use alloc::boxed; + use cell::UnsafeCell; + use mem; + use ptr; + use sys_common::thread_local::StaticKey as OsStaticKey; + + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub struct Key { + // Statically allocated initialization expression, using an `UnsafeCell` + // for the same reasons as above. + #[unstable(feature = "thread_local_internals")] + pub inner: UnsafeCell, + + // OS-TLS key that we'll use to key off. + #[unstable(feature = "thread_local_internals")] + pub os: OsStaticKey, + } + + unsafe impl ::marker::Sync for Key { } + + struct Value { + key: &'static Key, + value: T, + } + + #[doc(hidden)] + impl Key { + pub unsafe fn get(&'static self) -> Option<&'static T> { + self.ptr().map(|p| &*p) + } + + unsafe fn ptr(&'static self) -> Option<*mut T> { + let ptr = self.os.get() as *mut Value; + if !ptr.is_null() { + if ptr as usize == 1 { + return None + } + return Some(&mut (*ptr).value as *mut T); + } + + // If the lookup returned null, we haven't initialized our own local + // copy, so do that now. + // + // Also note that this transmute_copy should be ok because the value + // `inner` is already validated to be a valid `static` value, so we + // should be able to freely copy the bits. + let ptr: Box> = box Value { + key: self, + value: mem::transmute_copy(&self.inner), + }; + let ptr: *mut Value = boxed::into_raw(ptr); + self.os.set(ptr as *mut u8); + Some(&mut (*ptr).value as *mut T) + } + } + + #[doc(hidden)] + #[unstable(feature = "thread_local_internals")] + pub unsafe extern fn destroy_value(ptr: *mut u8) { + // The OS TLS ensures that this key contains a NULL value when this + // destructor starts to run. We set it back to a sentinel value of 1 to + // ensure that any future calls to `get` for this thread will return + // `None`. + // + // Note that to prevent an infinite loop we reset it back to null right + // before we return from the destructor ourselves. + let ptr: Box> = Box::from_raw(ptr as *mut Value); + let key = ptr.key; + key.os.set(1 as *mut u8); + drop(ptr); + key.os.set(ptr::null_mut()); + } +} + +#[cfg(test)] +mod tests { + use prelude::v1::*; + + use sync::mpsc::{channel, Sender}; + use cell::UnsafeCell; + use super::LocalKeyState; + use thread; + + struct Foo(Sender<()>); + + impl Drop for Foo { + fn drop(&mut self) { + let Foo(ref s) = *self; + s.send(()).unwrap(); + } + } + + #[test] + fn smoke_no_dtor() { + thread_local!(static FOO: UnsafeCell = UnsafeCell { value: 1 }); + + FOO.with(|f| unsafe { + assert_eq!(*f.get(), 1); + *f.get() = 2; + }); + let (tx, rx) = channel(); + let _t = thread::spawn(move|| { + FOO.with(|f| unsafe { + assert_eq!(*f.get(), 1); + }); + tx.send(()).unwrap(); + }); + rx.recv().unwrap(); + + FOO.with(|f| unsafe { + assert_eq!(*f.get(), 2); + }); + } + + #[test] + fn states() { + struct Foo; + impl Drop for Foo { + fn drop(&mut self) { + assert!(FOO.state() == LocalKeyState::Destroyed); + } + } + fn foo() -> Foo { + assert!(FOO.state() == LocalKeyState::Uninitialized); + Foo + } + thread_local!(static FOO: Foo = foo()); + + thread::spawn(|| { + assert!(FOO.state() == LocalKeyState::Uninitialized); + FOO.with(|_| { + assert!(FOO.state() == LocalKeyState::Valid); + }); + assert!(FOO.state() == LocalKeyState::Valid); + }).join().ok().unwrap(); + } + + #[test] + fn smoke_dtor() { + thread_local!(static FOO: UnsafeCell> = UnsafeCell { + value: None + }); + + let (tx, rx) = channel(); + let _t = thread::spawn(move|| unsafe { + let mut tx = Some(tx); + FOO.with(|f| { + *f.get() = Some(Foo(tx.take().unwrap())); + }); + }); + rx.recv().unwrap(); + } + + #[test] + fn circular() { + struct S1; + struct S2; + thread_local!(static K1: UnsafeCell> = UnsafeCell { + value: None + }); + thread_local!(static K2: UnsafeCell> = UnsafeCell { + value: None + }); + static mut HITS: u32 = 0; + + impl Drop for S1 { + fn drop(&mut self) { + unsafe { + HITS += 1; + if K2.state() == LocalKeyState::Destroyed { + assert_eq!(HITS, 3); + } else { + if HITS == 1 { + K2.with(|s| *s.get() = Some(S2)); + } else { + assert_eq!(HITS, 3); + } + } + } + } + } + impl Drop for S2 { + fn drop(&mut self) { + unsafe { + HITS += 1; + assert!(K1.state() != LocalKeyState::Destroyed); + assert_eq!(HITS, 2); + K1.with(|s| *s.get() = Some(S1)); + } + } + } + + thread::spawn(move|| { + drop(S1); + }).join().ok().unwrap(); + } + + #[test] + fn self_referential() { + struct S1; + thread_local!(static K1: UnsafeCell> = UnsafeCell { + value: None + }); + + impl Drop for S1 { + fn drop(&mut self) { + assert!(K1.state() == LocalKeyState::Destroyed); + } + } + + thread::spawn(move|| unsafe { + K1.with(|s| *s.get() = Some(S1)); + }).join().ok().unwrap(); + } + + #[test] + fn dtors_in_dtors_in_dtors() { + struct S1(Sender<()>); + thread_local!(static K1: UnsafeCell> = UnsafeCell { + value: None + }); + thread_local!(static K2: UnsafeCell> = UnsafeCell { + value: None + }); + + impl Drop for S1 { + fn drop(&mut self) { + let S1(ref tx) = *self; + unsafe { + if K2.state() != LocalKeyState::Destroyed { + K2.with(|s| *s.get() = Some(Foo(tx.clone()))); + } + } + } + } + + let (tx, rx) = channel(); + let _t = thread::spawn(move|| unsafe { + let mut tx = Some(tx); + K1.with(|s| *s.get() = Some(S1(tx.take().unwrap()))); + }); + rx.recv().unwrap(); + } +} + +#[cfg(test)] +mod dynamic_tests { + use prelude::v1::*; + + use cell::RefCell; + use collections::HashMap; + + #[test] + fn smoke() { + fn square(i: i32) -> i32 { i * i } + thread_local!(static FOO: i32 = square(3)); + + FOO.with(|f| { + assert_eq!(*f, 9); + }); + } + + #[test] + fn hashmap() { + fn map() -> RefCell> { + let mut m = HashMap::new(); + m.insert(1, 2); + RefCell::new(m) + } + thread_local!(static FOO: RefCell> = map()); + + FOO.with(|map| { + assert_eq!(map.borrow()[1], 2); + }); + } + + #[test] + fn refcell_vec() { + thread_local!(static FOO: RefCell> = RefCell::new(vec![1, 2, 3])); + + FOO.with(|vec| { + assert_eq!(vec.borrow().len(), 3); + vec.borrow_mut().push(4); + assert_eq!(vec.borrow()[3], 4); + }); + } +} diff --git a/src/libstd/thread/mod.rs b/src/libstd/thread/mod.rs new file mode 100644 index 00000000000..57baeb1fb74 --- /dev/null +++ b/src/libstd/thread/mod.rs @@ -0,0 +1,1026 @@ +// Copyright 2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Native threads +//! +//! ## The threading model +//! +//! An executing Rust program consists of a collection of native OS threads, +//! each with their own stack and local state. +//! +//! Communication between threads can be done through +//! [channels](../../std/sync/mpsc/index.html), Rust's message-passing +//! types, along with [other forms of thread +//! synchronization](../../std/sync/index.html) and shared-memory data +//! structures. In particular, types that are guaranteed to be +//! threadsafe are easily shared between threads using the +//! atomically-reference-counted container, +//! [`Arc`](../../std/sync/struct.Arc.html). +//! +//! Fatal logic errors in Rust cause *thread panic*, during which +//! a thread will unwind the stack, running destructors and freeing +//! owned resources. Thread panic is unrecoverable from within +//! the panicking thread (i.e. there is no 'try/catch' in Rust), but +//! the panic may optionally be detected from a different thread. If +//! the main thread panics, the application will exit with a non-zero +//! exit code. +//! +//! When the main thread of a Rust program terminates, the entire program shuts +//! down, even if other threads are still running. However, this module provides +//! convenient facilities for automatically waiting for the termination of a +//! child thread (i.e., join). +//! +//! ## The `Thread` type +//! +//! Threads are represented via the `Thread` type, which you can +//! get in one of two ways: +//! +//! * By spawning a new thread, e.g. using the `thread::spawn` function. +//! * By requesting the current thread, using the `thread::current` function. +//! +//! Threads can be named, and provide some built-in support for low-level +//! synchronization (described below). +//! +//! The `thread::current()` function is available even for threads not spawned +//! by the APIs of this module. +//! +//! ## Spawning a thread +//! +//! A new thread can be spawned using the `thread::spawn` function: +//! +//! ```rust +//! use std::thread; +//! +//! thread::spawn(move || { +//! // some work here +//! }); +//! ``` +//! +//! In this example, the spawned thread is "detached" from the current +//! thread. This means that it can outlive its parent (the thread that spawned +//! it), unless this parent is the main thread. +//! +//! ## Scoped threads +//! +//! Often a parent thread uses a child thread to perform some particular task, +//! and at some point must wait for the child to complete before continuing. +//! For this scenario, use the `thread::scoped` function: +//! +//! ```rust +//! use std::thread; +//! +//! let guard = thread::scoped(move || { +//! // some work here +//! }); +//! +//! // do some other work in the meantime +//! let output = guard.join(); +//! ``` +//! +//! The `scoped` function doesn't return a `Thread` directly; instead, +//! it returns a *join guard*. The join guard is an RAII-style guard +//! that will automatically join the child thread (block until it +//! terminates) when it is dropped. You can join the child thread in +//! advance by calling the `join` method on the guard, which will also +//! return the result produced by the thread. A handle to the thread +//! itself is available via the `thread` method of the join guard. +//! +//! ## Configuring threads +//! +//! A new thread can be configured before it is spawned via the `Builder` type, +//! which currently allows you to set the name, stack size, and writers for +//! `println!` and `panic!` for the child thread: +//! +//! ```rust +//! use std::thread; +//! +//! thread::Builder::new().name("child1".to_string()).spawn(move || { +//! println!("Hello, world!"); +//! }); +//! ``` +//! +//! ## Blocking support: park and unpark +//! +//! Every thread is equipped with some basic low-level blocking support, via the +//! `park` and `unpark` functions. +//! +//! Conceptually, each `Thread` handle has an associated token, which is +//! initially not present: +//! +//! * The `thread::park()` function blocks the current thread unless or until +//! the token is available for its thread handle, at which point it atomically +//! consumes the token. It may also return *spuriously*, without consuming the +//! token. `thread::park_timeout()` does the same, but allows specifying a +//! maximum time to block the thread for. +//! +//! * The `unpark()` method on a `Thread` atomically makes the token available +//! if it wasn't already. +//! +//! In other words, each `Thread` acts a bit like a semaphore with initial count +//! 0, except that the semaphore is *saturating* (the count cannot go above 1), +//! and can return spuriously. +//! +//! The API is typically used by acquiring a handle to the current thread, +//! placing that handle in a shared data structure so that other threads can +//! find it, and then `park`ing. When some desired condition is met, another +//! thread calls `unpark` on the handle. +//! +//! The motivation for this design is twofold: +//! +//! * It avoids the need to allocate mutexes and condvars when building new +//! synchronization primitives; the threads already provide basic blocking/signaling. +//! +//! * It can be implemented very efficiently on many platforms. +//! +//! ## Thread-local storage +//! +//! This module also provides an implementation of thread local storage for Rust +//! programs. Thread local storage is a method of storing data into a global +//! variable which each thread in the program will have its own copy of. +//! Threads do not share this data, so accesses do not need to be synchronized. +//! +//! At a high level, this module provides two variants of storage: +//! +//! * Owned thread-local storage. This is a type of thread local key which +//! owns the value that it contains, and will destroy the value when the +//! thread exits. This variant is created with the `thread_local!` macro and +//! can contain any value which is `'static` (no borrowed pointers). +//! +//! * Scoped thread-local storage. This type of key is used to store a reference +//! to a value into local storage temporarily for the scope of a function +//! call. There are no restrictions on what types of values can be placed +//! into this key. +//! +//! Both forms of thread local storage provide an accessor function, `with`, +//! which will yield a shared reference to the value to the specified +//! closure. Thread-local keys only allow shared access to values as there is no +//! way to guarantee uniqueness if a mutable borrow was allowed. Most values +//! will want to make use of some form of **interior mutability** through the +//! `Cell` or `RefCell` types. + +#![stable(feature = "rust1", since = "1.0.0")] + +#[stable(feature = "rust1", since = "1.0.0")] +pub use self::__local::{LocalKey, LocalKeyState}; + +#[unstable(feature = "scoped_tls", + reason = "scoped TLS has yet to have wide enough use to fully consider \ + stabilizing its interface")] +pub use self::__scoped::ScopedKey; + +use prelude::v1::*; + +use any::Any; +use cell::UnsafeCell; +use fmt; +use io; +use marker::PhantomData; +use rt::{self, unwind}; +use sync::{Mutex, Condvar, Arc}; +use sys::thread as imp; +use sys_common::{stack, thread_info}; +use thunk::Thunk; +use time::Duration; + +#[allow(deprecated)] use old_io::Writer; + +//////////////////////////////////////////////////////////////////////////////// +// Thread-local storage +//////////////////////////////////////////////////////////////////////////////// + +#[macro_use] +#[doc(hidden)] +#[path = "local.rs"] pub mod __local; + +#[macro_use] +#[doc(hidden)] +#[path = "scoped.rs"] pub mod __scoped; + +//////////////////////////////////////////////////////////////////////////////// +// Builder +//////////////////////////////////////////////////////////////////////////////// + +/// Thread configuration. Provides detailed control over the properties +/// and behavior of new threads. +#[stable(feature = "rust1", since = "1.0.0")] +pub struct Builder { + // A name for the thread-to-be, for identification in panic messages + name: Option, + // The size of the stack for the spawned thread + stack_size: Option, +} + +impl Builder { + /// Generate the base configuration for spawning a thread, from which + /// configuration methods can be chained. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn new() -> Builder { + Builder { + name: None, + stack_size: None, + } + } + + /// Name the thread-to-be. Currently the name is used for identification + /// only in panic messages. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn name(mut self, name: String) -> Builder { + self.name = Some(name); + self + } + + /// Set the size of the stack for the new thread. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn stack_size(mut self, size: usize) -> Builder { + self.stack_size = Some(size); + self + } + + /// Redirect thread-local stdout. + #[unstable(feature = "std_misc", + reason = "Will likely go away after proc removal")] + #[deprecated(since = "1.0.0", + reason = "the old I/O module is deprecated and this function \ + will be removed with no replacement")] + #[allow(deprecated)] + pub fn stdout(self, _stdout: Box) -> Builder { + self + } + + /// Redirect thread-local stderr. + #[unstable(feature = "std_misc", + reason = "Will likely go away after proc removal")] + #[deprecated(since = "1.0.0", + reason = "the old I/O module is deprecated and this function \ + will be removed with no replacement")] + #[allow(deprecated)] + pub fn stderr(self, _stderr: Box) -> Builder { + self + } + + /// Spawn a new thread, and return a join handle for it. + /// + /// The child thread may outlive the parent (unless the parent thread + /// is the main thread; the whole process is terminated when the main + /// thread finishes.) The join handle can be used to block on + /// termination of the child thread, including recovering its panics. + /// + /// # Errors + /// + /// Unlike the `spawn` free function, this method yields an + /// `io::Result` to capture any failure to create the thread at + /// the OS level. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn spawn(self, f: F) -> io::Result where + F: FnOnce(), F: Send + 'static + { + self.spawn_inner(Thunk::new(f)).map(|i| JoinHandle(i)) + } + + /// Spawn a new child thread that must be joined within a given + /// scope, and return a `JoinGuard`. + /// + /// The join guard can be used to explicitly join the child thread (via + /// `join`), returning `Result`, or it will implicitly join the child + /// upon being dropped. Because the child thread may refer to data on the + /// current thread's stack (hence the "scoped" name), it cannot be detached; + /// it *must* be joined before the relevant stack frame is popped. See the + /// module documentation for additional details. + /// + /// # Errors + /// + /// Unlike the `scoped` free function, this method yields an + /// `io::Result` to capture any failure to create the thread at + /// the OS level. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn scoped<'a, T, F>(self, f: F) -> io::Result> where + T: Send + 'a, F: FnOnce() -> T, F: Send + 'a + { + self.spawn_inner(Thunk::new(f)).map(|inner| { + JoinGuard { inner: inner, _marker: PhantomData } + }) + } + + fn spawn_inner(self, f: Thunk<(), T>) -> io::Result> { + let Builder { name, stack_size } = self; + + let stack_size = stack_size.unwrap_or(rt::min_stack()); + + let my_thread = Thread::new(name); + let their_thread = my_thread.clone(); + + let my_packet = Packet(Arc::new(UnsafeCell::new(None))); + let their_packet = Packet(my_packet.0.clone()); + + // Spawning a new OS thread guarantees that __morestack will never get + // triggered, but we must manually set up the actual stack bounds once + // this function starts executing. This raises the lower limit by a bit + // because by the time that this function is executing we've already + // consumed at least a little bit of stack (we don't know the exact byte + // address at which our stack started). + let main = move || { + let something_around_the_top_of_the_stack = 1; + let addr = &something_around_the_top_of_the_stack as *const i32; + let my_stack_top = addr as usize; + let my_stack_bottom = my_stack_top - stack_size + 1024; + unsafe { + if let Some(name) = their_thread.name() { + imp::set_name(name); + } + stack::record_os_managed_stack_bounds(my_stack_bottom, + my_stack_top); + thread_info::set(imp::guard::current(), their_thread); + } + + let mut output = None; + let try_result = { + let ptr = &mut output; + + // There are two primary reasons that general try/catch is + // unsafe. The first is that we do not support nested + // try/catch. The fact that this is happening in a newly-spawned + // thread suffices. The second is that unwinding while unwinding + // is not defined. We take care of that by having an + // 'unwinding' flag in the thread itself. For these reasons, + // this unsafety should be ok. + unsafe { + unwind::try(move || *ptr = Some(f.invoke(()))) + } + }; + unsafe { + *their_packet.0.get() = Some(match (output, try_result) { + (Some(data), Ok(_)) => Ok(data), + (None, Err(cause)) => Err(cause), + _ => unreachable!() + }); + } + }; + + Ok(JoinInner { + native: try!(unsafe { imp::create(stack_size, Thunk::new(main)) }), + thread: my_thread, + packet: my_packet, + joined: false, + }) + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Free functions +//////////////////////////////////////////////////////////////////////////////// + +/// Spawn a new thread, returning a `JoinHandle` for it. +/// +/// The join handle will implicitly *detach* the child thread upon being +/// dropped. In this case, the child thread may outlive the parent (unless +/// the parent thread is the main thread; the whole process is terminated when +/// the main thread finishes.) Additionally, the join handle provides a `join` +/// method that can be used to join the child thread. If the child thread +/// panics, `join` will return an `Err` containing the argument given to +/// `panic`. +/// +/// # Panics +/// +/// Panicks if the OS fails to create a thread; use `Builder::spawn` +/// to recover from such errors. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn spawn(f: F) -> JoinHandle where F: FnOnce(), F: Send + 'static { + Builder::new().spawn(f).unwrap() +} + +/// Spawn a new *scoped* thread, returning a `JoinGuard` for it. +/// +/// The join guard can be used to explicitly join the child thread (via +/// `join`), returning `Result`, or it will implicitly join the child +/// upon being dropped. Because the child thread may refer to data on the +/// current thread's stack (hence the "scoped" name), it cannot be detached; +/// it *must* be joined before the relevant stack frame is popped. See the +/// module documentation for additional details. +/// +/// # Panics +/// +/// Panicks if the OS fails to create a thread; use `Builder::scoped` +/// to recover from such errors. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn scoped<'a, T, F>(f: F) -> JoinGuard<'a, T> where + T: Send + 'a, F: FnOnce() -> T, F: Send + 'a +{ + Builder::new().scoped(f).unwrap() +} + +/// Gets a handle to the thread that invokes it. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn current() -> Thread { + thread_info::current_thread() +} + +/// Cooperatively give up a timeslice to the OS scheduler. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn yield_now() { + unsafe { imp::yield_now() } +} + +/// Determines whether the current thread is unwinding because of panic. +#[inline] +#[stable(feature = "rust1", since = "1.0.0")] +pub fn panicking() -> bool { + unwind::panicking() +} + +/// Put the current thread to sleep for the specified amount of time. +/// +/// The thread may sleep longer than the duration specified due to scheduling +/// specifics or platform-dependent functionality. Note that on unix platforms +/// this function will not return early due to a signal being received or a +/// spurious wakeup. +#[unstable(feature = "thread_sleep", + reason = "recently added, needs an RFC, and `Duration` itself is \ + unstable")] +pub fn sleep(dur: Duration) { + imp::sleep(dur) +} + +/// Block unless or until the current thread's token is made available (may wake spuriously). +/// +/// See the module doc for more detail. +// +// The implementation currently uses the trivial strategy of a Mutex+Condvar +// with wakeup flag, which does not actually allow spurious wakeups. In the +// future, this will be implemented in a more efficient way, perhaps along the lines of +// http://cr.openjdk.java.net/~stefank/6989984.1/raw_files/new/src/os/linux/vm/os_linux.cpp +// or futuxes, and in either case may allow spurious wakeups. +#[stable(feature = "rust1", since = "1.0.0")] +pub fn park() { + let thread = current(); + let mut guard = thread.inner.lock.lock().unwrap(); + while !*guard { + guard = thread.inner.cvar.wait(guard).unwrap(); + } + *guard = false; +} + +/// Block unless or until the current thread's token is made available or +/// the specified duration has been reached (may wake spuriously). +/// +/// The semantics of this function are equivalent to `park()` except that the +/// thread will be blocked for roughly no longer than *duration*. This method +/// should not be used for precise timing due to anomalies such as +/// preemption or platform differences that may not cause the maximum +/// amount of time waited to be precisely *duration* long. +/// +/// See the module doc for more detail. +#[unstable(feature = "std_misc", reason = "recently introduced, depends on Duration")] +pub fn park_timeout(duration: Duration) { + let thread = current(); + let mut guard = thread.inner.lock.lock().unwrap(); + if !*guard { + let (g, _) = thread.inner.cvar.wait_timeout(guard, duration).unwrap(); + guard = g; + } + *guard = false; +} + +//////////////////////////////////////////////////////////////////////////////// +// Thread +//////////////////////////////////////////////////////////////////////////////// + +/// The internal representation of a `Thread` handle +struct Inner { + name: Option, + lock: Mutex, // true when there is a buffered unpark + cvar: Condvar, +} + +unsafe impl Sync for Inner {} + +#[derive(Clone)] +#[stable(feature = "rust1", since = "1.0.0")] +/// A handle to a thread. +pub struct Thread { + inner: Arc, +} + +impl Thread { + // Used only internally to construct a thread object without spawning + fn new(name: Option) -> Thread { + Thread { + inner: Arc::new(Inner { + name: name, + lock: Mutex::new(false), + cvar: Condvar::new(), + }) + } + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[unstable(feature = "std_misc", + reason = "may change with specifics of new Send semantics")] + pub fn spawn(f: F) -> Thread where F: FnOnce(), F: Send + 'static { + Builder::new().spawn(f).unwrap().thread().clone() + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[unstable(feature = "std_misc", + reason = "may change with specifics of new Send semantics")] + pub fn scoped<'a, T, F>(f: F) -> JoinGuard<'a, T> where + T: Send + 'a, F: FnOnce() -> T, F: Send + 'a + { + Builder::new().scoped(f).unwrap() + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[stable(feature = "rust1", since = "1.0.0")] + pub fn current() -> Thread { + thread_info::current_thread() + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[unstable(feature = "std_misc", reason = "name may change")] + pub fn yield_now() { + unsafe { imp::yield_now() } + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[inline] + #[stable(feature = "rust1", since = "1.0.0")] + pub fn panicking() -> bool { + unwind::panicking() + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[unstable(feature = "std_misc", reason = "recently introduced")] + pub fn park() { + let thread = current(); + let mut guard = thread.inner.lock.lock().unwrap(); + while !*guard { + guard = thread.inner.cvar.wait(guard).unwrap(); + } + *guard = false; + } + + /// Deprecated: use module-level free function. + #[deprecated(since = "1.0.0", reason = "use module-level free function")] + #[unstable(feature = "std_misc", reason = "recently introduced")] + pub fn park_timeout(duration: Duration) { + let thread = current(); + let mut guard = thread.inner.lock.lock().unwrap(); + if !*guard { + let (g, _) = thread.inner.cvar.wait_timeout(guard, duration).unwrap(); + guard = g; + } + *guard = false; + } + + /// Atomically makes the handle's token available if it is not already. + /// + /// See the module doc for more detail. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn unpark(&self) { + let mut guard = self.inner.lock.lock().unwrap(); + if !*guard { + *guard = true; + self.inner.cvar.notify_one(); + } + } + + /// Get the thread's name. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn name(&self) -> Option<&str> { + self.inner.name.as_ref().map(|s| &**s) + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl fmt::Debug for Thread { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.name(), f) + } +} + +// a hack to get around privacy restrictions +impl thread_info::NewThread for Thread { + fn new(name: Option) -> Thread { Thread::new(name) } +} + +//////////////////////////////////////////////////////////////////////////////// +// JoinHandle and JoinGuard +//////////////////////////////////////////////////////////////////////////////// + +/// Indicates the manner in which a thread exited. +/// +/// A thread that completes without panicking is considered to exit successfully. +#[stable(feature = "rust1", since = "1.0.0")] +pub type Result = ::result::Result>; + +struct Packet(Arc>>>); + +unsafe impl Send for Packet {} +unsafe impl Sync for Packet {} + +/// Inner representation for JoinHandle and JoinGuard +struct JoinInner { + native: imp::rust_thread, + thread: Thread, + packet: Packet, + joined: bool, +} + +impl JoinInner { + fn join(&mut self) -> Result { + assert!(!self.joined); + unsafe { imp::join(self.native) }; + self.joined = true; + unsafe { + (*self.packet.0.get()).take().unwrap() + } + } +} + +/// An owned permission to join on a thread (block on its termination). +/// +/// Unlike a `JoinGuard`, a `JoinHandle` *detaches* the child thread +/// when it is dropped, rather than automatically joining on drop. +/// +/// Due to platform restrictions, it is not possible to `Clone` this +/// handle: the ability to join a child thread is a uniquely-owned +/// permission. +#[stable(feature = "rust1", since = "1.0.0")] +pub struct JoinHandle(JoinInner<()>); + +impl JoinHandle { + /// Extract a handle to the underlying thread + #[stable(feature = "rust1", since = "1.0.0")] + pub fn thread(&self) -> &Thread { + &self.0.thread + } + + /// Wait for the associated thread to finish. + /// + /// If the child thread panics, `Err` is returned with the parameter given + /// to `panic`. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn join(mut self) -> Result<()> { + self.0.join() + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl Drop for JoinHandle { + fn drop(&mut self) { + if !self.0.joined { + unsafe { imp::detach(self.0.native) } + } + } +} + +/// An RAII-style guard that will block until thread termination when dropped. +/// +/// The type `T` is the return type for the thread's main function. +/// +/// Joining on drop is necessary to ensure memory safety when stack +/// data is shared between a parent and child thread. +/// +/// Due to platform restrictions, it is not possible to `Clone` this +/// handle: the ability to join a child thread is a uniquely-owned +/// permission. +#[must_use = "thread will be immediately joined if `JoinGuard` is not used"] +#[stable(feature = "rust1", since = "1.0.0")] +pub struct JoinGuard<'a, T: 'a> { + inner: JoinInner, + _marker: PhantomData<&'a T>, +} + +#[stable(feature = "rust1", since = "1.0.0")] +unsafe impl<'a, T: Send + 'a> Sync for JoinGuard<'a, T> {} + +impl<'a, T: Send + 'a> JoinGuard<'a, T> { + /// Extract a handle to the thread this guard will join on. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn thread(&self) -> &Thread { + &self.inner.thread + } + + /// Wait for the associated thread to finish, returning the result of the thread's + /// calculation. + /// + /// # Panics + /// + /// Panics on the child thread are propagated by panicking the parent. + #[stable(feature = "rust1", since = "1.0.0")] + pub fn join(mut self) -> T { + match self.inner.join() { + Ok(res) => res, + Err(_) => panic!("child thread {:?} panicked", self.thread()), + } + } +} + +#[stable(feature = "rust1", since = "1.0.0")] +impl JoinGuard<'static, T> { + /// Detaches the child thread, allowing it to outlive its parent. + #[deprecated(since = "1.0.0", reason = "use spawn instead")] + #[unstable(feature = "std_misc")] + pub fn detach(mut self) { + unsafe { imp::detach(self.inner.native) }; + self.inner.joined = true; // avoid joining in the destructor + } +} + +#[unsafe_destructor] +#[stable(feature = "rust1", since = "1.0.0")] +impl<'a, T: Send + 'a> Drop for JoinGuard<'a, T> { + fn drop(&mut self) { + if !self.inner.joined { + if self.inner.join().is_err() { + panic!("child thread {:?} panicked", self.thread()); + } + } + } +} + +//////////////////////////////////////////////////////////////////////////////// +// Tests +//////////////////////////////////////////////////////////////////////////////// + +#[cfg(test)] +mod test { + use prelude::v1::*; + + use any::Any; + use sync::mpsc::{channel, Sender}; + use boxed::BoxAny; + use result; + use std::old_io::{ChanReader, ChanWriter}; + use super::{Builder}; + use thread; + use thunk::Thunk; + use time::Duration; + + // !!! These tests are dangerous. If something is buggy, they will hang, !!! + // !!! instead of exiting cleanly. This might wedge the buildbots. !!! + + #[test] + fn test_unnamed_thread() { + thread::spawn(move|| { + assert!(thread::current().name().is_none()); + }).join().ok().unwrap(); + } + + #[test] + fn test_named_thread() { + Builder::new().name("ada lovelace".to_string()).scoped(move|| { + assert!(thread::current().name().unwrap() == "ada lovelace".to_string()); + }).unwrap().join(); + } + + #[test] + fn test_run_basic() { + let (tx, rx) = channel(); + thread::spawn(move|| { + tx.send(()).unwrap(); + }); + rx.recv().unwrap(); + } + + #[test] + fn test_join_success() { + assert!(thread::scoped(move|| -> String { + "Success!".to_string() + }).join() == "Success!"); + } + + #[test] + fn test_join_panic() { + match thread::spawn(move|| { + panic!() + }).join() { + result::Result::Err(_) => (), + result::Result::Ok(()) => panic!() + } + } + + #[test] + fn test_scoped_success() { + let res = thread::scoped(move|| -> String { + "Success!".to_string() + }).join(); + assert!(res == "Success!"); + } + + #[test] + #[should_fail] + fn test_scoped_panic() { + thread::scoped(|| panic!()).join(); + } + + #[test] + #[should_fail] + fn test_scoped_implicit_panic() { + let _ = thread::scoped(|| panic!()); + } + + #[test] + fn test_spawn_sched() { + use clone::Clone; + + let (tx, rx) = channel(); + + fn f(i: i32, tx: Sender<()>) { + let tx = tx.clone(); + thread::spawn(move|| { + if i == 0 { + tx.send(()).unwrap(); + } else { + f(i - 1, tx); + } + }); + + } + f(10, tx); + rx.recv().unwrap(); + } + + #[test] + fn test_spawn_sched_childs_on_default_sched() { + let (tx, rx) = channel(); + + thread::spawn(move|| { + thread::spawn(move|| { + tx.send(()).unwrap(); + }); + }); + + rx.recv().unwrap(); + } + + fn avoid_copying_the_body(spawnfn: F) where F: FnOnce(Thunk<'static>) { + let (tx, rx) = channel(); + + let x: Box<_> = box 1; + let x_in_parent = (&*x) as *const i32 as usize; + + spawnfn(Thunk::new(move|| { + let x_in_child = (&*x) as *const i32 as usize; + tx.send(x_in_child).unwrap(); + })); + + let x_in_child = rx.recv().unwrap(); + assert_eq!(x_in_parent, x_in_child); + } + + #[test] + fn test_avoid_copying_the_body_spawn() { + avoid_copying_the_body(|v| { + thread::spawn(move || v.invoke(())); + }); + } + + #[test] + fn test_avoid_copying_the_body_thread_spawn() { + avoid_copying_the_body(|f| { + thread::spawn(move|| { + f.invoke(()); + }); + }) + } + + #[test] + fn test_avoid_copying_the_body_join() { + avoid_copying_the_body(|f| { + let _ = thread::spawn(move|| { + f.invoke(()) + }).join(); + }) + } + + #[test] + fn test_child_doesnt_ref_parent() { + // If the child refcounts the parent task, this will stack overflow when + // climbing the task tree to dereference each ancestor. (See #1789) + // (well, it would if the constant were 8000+ - I lowered it to be more + // valgrind-friendly. try this at home, instead..!) + const GENERATIONS: u32 = 16; + fn child_no(x: u32) -> Thunk<'static> { + return Thunk::new(move|| { + if x < GENERATIONS { + thread::spawn(move|| child_no(x+1).invoke(())); + } + }); + } + thread::spawn(|| child_no(0).invoke(())); + } + + #[test] + fn test_simple_newsched_spawn() { + thread::spawn(move || {}); + } + + #[test] + fn test_try_panic_message_static_str() { + match thread::spawn(move|| { + panic!("static string"); + }).join() { + Err(e) => { + type T = &'static str; + assert!(e.is::()); + assert_eq!(*e.downcast::().unwrap(), "static string"); + } + Ok(()) => panic!() + } + } + + #[test] + fn test_try_panic_message_owned_str() { + match thread::spawn(move|| { + panic!("owned string".to_string()); + }).join() { + Err(e) => { + type T = String; + assert!(e.is::()); + assert_eq!(*e.downcast::().unwrap(), "owned string".to_string()); + } + Ok(()) => panic!() + } + } + + #[test] + fn test_try_panic_message_any() { + match thread::spawn(move|| { + panic!(box 413u16 as Box); + }).join() { + Err(e) => { + type T = Box; + assert!(e.is::()); + let any = e.downcast::().unwrap(); + assert!(any.is::()); + assert_eq!(*any.downcast::().unwrap(), 413); + } + Ok(()) => panic!() + } + } + + #[test] + fn test_try_panic_message_unit_struct() { + struct Juju; + + match thread::spawn(move|| { + panic!(Juju) + }).join() { + Err(ref e) if e.is::() => {} + Err(_) | Ok(()) => panic!() + } + } + + #[test] + fn test_park_timeout_unpark_before() { + for _ in 0..10 { + thread::current().unpark(); + thread::park_timeout(Duration::seconds(10_000_000)); + } + } + + #[test] + fn test_park_timeout_unpark_not_called() { + for _ in 0..10 { + thread::park_timeout(Duration::milliseconds(10)); + } + } + + #[test] + fn test_park_timeout_unpark_called_other_thread() { + use std::old_io; + + for _ in 0..10 { + let th = thread::current(); + + let _guard = thread::spawn(move || { + old_io::timer::sleep(Duration::milliseconds(50)); + th.unpark(); + }); + + thread::park_timeout(Duration::seconds(10_000_000)); + } + } + + #[test] + fn sleep_smoke() { + thread::sleep(Duration::milliseconds(2)); + thread::sleep(Duration::milliseconds(-2)); + } + + // NOTE: the corresponding test for stderr is in run-pass/task-stderr, due + // to the test harness apparently interfering with stderr configuration. +} diff --git a/src/libstd/thread/scoped.rs b/src/libstd/thread/scoped.rs new file mode 100644 index 00000000000..2a8be2ad82c --- /dev/null +++ b/src/libstd/thread/scoped.rs @@ -0,0 +1,317 @@ +// Copyright 2014-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Scoped thread-local storage +//! +//! This module provides the ability to generate *scoped* thread-local +//! variables. In this sense, scoped indicates that thread local storage +//! actually stores a reference to a value, and this reference is only placed +//! in storage for a scoped amount of time. +//! +//! There are no restrictions on what types can be placed into a scoped +//! variable, but all scoped variables are initialized to the equivalent of +//! null. Scoped thread local storage is useful when a value is present for a known +//! period of time and it is not required to relinquish ownership of the +//! contents. +//! +//! # Examples +//! +//! ``` +//! scoped_thread_local!(static FOO: u32); +//! +//! // Initially each scoped slot is empty. +//! assert!(!FOO.is_set()); +//! +//! // When inserting a value, the value is only in place for the duration +//! // of the closure specified. +//! FOO.set(&1, || { +//! FOO.with(|slot| { +//! assert_eq!(*slot, 1); +//! }); +//! }); +//! ``` + +#![unstable(feature = "thread_local_internals")] + +use prelude::v1::*; + +// macro hygiene sure would be nice, wouldn't it? +#[doc(hidden)] +pub mod __impl { + pub use super::imp::KeyInner; + pub use sys_common::thread_local::INIT as OS_INIT; +} + +/// Type representing a thread local storage key corresponding to a reference +/// to the type parameter `T`. +/// +/// Keys are statically allocated and can contain a reference to an instance of +/// type `T` scoped to a particular lifetime. Keys provides two methods, `set` +/// and `with`, both of which currently use closures to control the scope of +/// their contents. +#[unstable(feature = "scoped_tls", + reason = "scoped TLS has yet to have wide enough use to fully consider \ + stabilizing its interface")] +pub struct ScopedKey { #[doc(hidden)] pub inner: __impl::KeyInner } + +/// Declare a new scoped thread local storage key. +/// +/// This macro declares a `static` item on which methods are used to get and +/// set the value stored within. +#[macro_export] +#[allow_internal_unstable] +macro_rules! scoped_thread_local { + (static $name:ident: $t:ty) => ( + __scoped_thread_local_inner!(static $name: $t); + ); + (pub static $name:ident: $t:ty) => ( + __scoped_thread_local_inner!(pub static $name: $t); + ); +} + +#[macro_export] +#[doc(hidden)] +#[allow_internal_unstable] +macro_rules! __scoped_thread_local_inner { + (static $name:ident: $t:ty) => ( + #[cfg_attr(not(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64")), + thread_local)] + static $name: ::std::thread::ScopedKey<$t> = + __scoped_thread_local_inner!($t); + ); + (pub static $name:ident: $t:ty) => ( + #[cfg_attr(not(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64")), + thread_local)] + pub static $name: ::std::thread::ScopedKey<$t> = + __scoped_thread_local_inner!($t); + ); + ($t:ty) => ({ + use std::thread::ScopedKey as __Key; + + #[cfg(not(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64")))] + const _INIT: __Key<$t> = __Key { + inner: ::std::thread::__scoped::__impl::KeyInner { + inner: ::std::cell::UnsafeCell { value: 0 as *mut _ }, + } + }; + + #[cfg(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64"))] + const _INIT: __Key<$t> = __Key { + inner: ::std::thread::__scoped::__impl::KeyInner { + inner: ::std::thread::__scoped::__impl::OS_INIT, + marker: ::std::marker::PhantomData::<::std::cell::Cell<$t>>, + } + }; + + _INIT + }) +} + +#[unstable(feature = "scoped_tls", + reason = "scoped TLS has yet to have wide enough use to fully consider \ + stabilizing its interface")] +impl ScopedKey { + /// Insert a value into this scoped thread local storage slot for a + /// duration of a closure. + /// + /// While `cb` is running, the value `t` will be returned by `get` unless + /// this function is called recursively inside of `cb`. + /// + /// Upon return, this function will restore the previous value, if any + /// was available. + /// + /// # Examples + /// + /// ``` + /// scoped_thread_local!(static FOO: u32); + /// + /// FOO.set(&100, || { + /// let val = FOO.with(|v| *v); + /// assert_eq!(val, 100); + /// + /// // set can be called recursively + /// FOO.set(&101, || { + /// // ... + /// }); + /// + /// // Recursive calls restore the previous value. + /// let val = FOO.with(|v| *v); + /// assert_eq!(val, 100); + /// }); + /// ``` + pub fn set(&'static self, t: &T, cb: F) -> R where + F: FnOnce() -> R, + { + struct Reset<'a, T: 'a> { + key: &'a __impl::KeyInner, + val: *mut T, + } + #[unsafe_destructor] + impl<'a, T> Drop for Reset<'a, T> { + fn drop(&mut self) { + unsafe { self.key.set(self.val) } + } + } + + let prev = unsafe { + let prev = self.inner.get(); + self.inner.set(t as *const T as *mut T); + prev + }; + + let _reset = Reset { key: &self.inner, val: prev }; + cb() + } + + /// Get a value out of this scoped variable. + /// + /// This function takes a closure which receives the value of this + /// variable. + /// + /// # Panics + /// + /// This function will panic if `set` has not previously been called. + /// + /// # Examples + /// + /// ```no_run + /// scoped_thread_local!(static FOO: u32); + /// + /// FOO.with(|slot| { + /// // work with `slot` + /// }); + /// ``` + pub fn with(&'static self, cb: F) -> R where + F: FnOnce(&T) -> R + { + unsafe { + let ptr = self.inner.get(); + assert!(!ptr.is_null(), "cannot access a scoped thread local \ + variable without calling `set` first"); + cb(&*ptr) + } + } + + /// Test whether this TLS key has been `set` for the current thread. + pub fn is_set(&'static self) -> bool { + unsafe { !self.inner.get().is_null() } + } +} + +#[cfg(not(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64")))] +mod imp { + use std::cell::UnsafeCell; + + #[doc(hidden)] + pub struct KeyInner { pub inner: UnsafeCell<*mut T> } + + unsafe impl ::marker::Sync for KeyInner { } + + #[doc(hidden)] + impl KeyInner { + #[doc(hidden)] + pub unsafe fn set(&self, ptr: *mut T) { *self.inner.get() = ptr; } + #[doc(hidden)] + pub unsafe fn get(&self) -> *mut T { *self.inner.get() } + } +} + +#[cfg(any(windows, + target_os = "android", + target_os = "ios", + target_os = "openbsd", + target_arch = "aarch64"))] +mod imp { + use marker; + use std::cell::Cell; + use sys_common::thread_local::StaticKey as OsStaticKey; + + #[doc(hidden)] + pub struct KeyInner { + pub inner: OsStaticKey, + pub marker: marker::PhantomData>, + } + + unsafe impl ::marker::Sync for KeyInner { } + + #[doc(hidden)] + impl KeyInner { + #[doc(hidden)] + pub unsafe fn set(&self, ptr: *mut T) { self.inner.set(ptr as *mut _) } + #[doc(hidden)] + pub unsafe fn get(&self) -> *mut T { self.inner.get() as *mut _ } + } +} + + +#[cfg(test)] +mod tests { + use cell::Cell; + use prelude::v1::*; + + scoped_thread_local!(static FOO: u32); + + #[test] + fn smoke() { + scoped_thread_local!(static BAR: u32); + + assert!(!BAR.is_set()); + BAR.set(&1, || { + assert!(BAR.is_set()); + BAR.with(|slot| { + assert_eq!(*slot, 1); + }); + }); + assert!(!BAR.is_set()); + } + + #[test] + fn cell_allowed() { + scoped_thread_local!(static BAR: Cell); + + BAR.set(&Cell::new(1), || { + BAR.with(|slot| { + assert_eq!(slot.get(), 1); + }); + }); + } + + #[test] + fn scope_item_allowed() { + assert!(!FOO.is_set()); + FOO.set(&1, || { + assert!(FOO.is_set()); + FOO.with(|slot| { + assert_eq!(*slot, 1); + }); + }); + assert!(!FOO.is_set()); + } +} -- cgit 1.4.1-3-g733a5 From 29b54387b88bdf43c00849e3483c2297723f5a73 Mon Sep 17 00:00:00 2001 From: Alex Crichton Date: Mon, 23 Mar 2015 15:54:39 -0700 Subject: Test fixes and rebase conflicts, round 2 --- src/doc/trpl/documentation.md | 3 +- src/liballoc/arc.rs | 76 +++++++++++++----------- src/liballoc/rc.rs | 90 ++++++++++++++++------------- src/libcollections/vec.rs | 32 ++++++---- src/libcollectionstest/lib.rs | 1 + src/libgraphviz/lib.rs | 40 ++++++------- src/librustc_back/lib.rs | 1 - src/librustc_back/rpath.rs | 4 +- src/librustc_lint/lib.rs | 1 - src/librustc_trans/trans/asm.rs | 2 +- src/librustc_typeck/lib.rs | 1 - src/libstd/env.rs | 5 +- src/libstd/path.rs | 23 ++++---- src/libstd/process.rs | 2 +- src/libstd/sys/unix/thread.rs | 5 +- src/libstd/sys/windows/fs2.rs | 2 +- src/libstd/sys/windows/mod.rs | 4 +- src/libstd/sys/windows/os.rs | 5 +- src/libstd/thread/scoped.rs | 6 +- src/test/run-make/issue-19371/foo.rs | 6 +- src/test/run-pass/create-dir-all-bare.rs | 2 + src/test/run-pass/issue-20797.rs | 8 +-- src/test/run-pass/send_str_hashmap.rs | 2 +- src/test/run-pass/send_str_treemap.rs | 2 +- src/test/run-pass/tcp-stress.rs | 3 + src/test/run-pass/ufcs-polymorphic-paths.rs | 2 +- 26 files changed, 179 insertions(+), 149 deletions(-) (limited to 'src/libstd/thread') diff --git a/src/doc/trpl/documentation.md b/src/doc/trpl/documentation.md index 7a459ad354d..54821e3ce30 100644 --- a/src/doc/trpl/documentation.md +++ b/src/doc/trpl/documentation.md @@ -361,7 +361,8 @@ Here’s an example of documenting a macro: #[macro_export] macro_rules! panic_unless { ($condition:expr, $($rest:expr),+) => ({ if ! $condition { panic!($($rest),+); } }); -} +} +# fn main() {} ``` You’ll note three things: we need to add our own `extern crate` line, so that diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index 97d3f78f67c..c9bbc0d74cd 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -128,8 +128,8 @@ unsafe impl Sync for Arc { } /// A weak pointer to an `Arc`. /// -/// Weak pointers will not keep the data inside of the `Arc` alive, and can be used to break cycles -/// between `Arc` pointers. +/// Weak pointers will not keep the data inside of the `Arc` alive, and can be +/// used to break cycles between `Arc` pointers. #[unsafe_no_drop_flag] #[unstable(feature = "alloc", reason = "Weak pointers may not belong in this module.")] @@ -218,8 +218,8 @@ impl Arc { unsafe fn drop_slow(&mut self) { let ptr = *self._ptr; - // Destroy the data at this time, even though we may not free the box allocation itself - // (there may still be weak pointers lying around). + // Destroy the data at this time, even though we may not free the box + // allocation itself (there may still be weak pointers lying around). drop(ptr::read(&self.inner().data)); if self.inner().weak.fetch_sub(1, Release) == 1 { @@ -286,8 +286,8 @@ impl Deref for Arc { impl Arc { /// Make a mutable reference from the given `Arc`. /// - /// This is also referred to as a copy-on-write operation because the inner data is cloned if - /// the reference count is greater than one. + /// This is also referred to as a copy-on-write operation because the inner + /// data is cloned if the reference count is greater than one. /// /// # Examples /// @@ -302,16 +302,18 @@ impl Arc { #[inline] #[unstable(feature = "alloc")] pub fn make_unique(&mut self) -> &mut T { - // Note that we hold a strong reference, which also counts as a weak reference, so we only - // clone if there is an additional reference of either kind. + // Note that we hold a strong reference, which also counts as a weak + // reference, so we only clone if there is an additional reference of + // either kind. if self.inner().strong.load(SeqCst) != 1 || self.inner().weak.load(SeqCst) != 1 { *self = Arc::new((**self).clone()) } - // This unsafety is ok because we're guaranteed that the pointer returned is the *only* - // pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at - // this point, and we required the Arc itself to be `mut`, so we're returning the only - // possible reference to the inner data. + // This unsafety is ok because we're guaranteed that the pointer + // returned is the *only* pointer that will ever be returned to T. Our + // reference count is guaranteed to be 1 at this point, and we required + // the Arc itself to be `mut`, so we're returning the only possible + // reference to the inner data. let inner = unsafe { &mut **self._ptr }; &mut inner.data } @@ -322,8 +324,9 @@ impl Arc { impl Drop for Arc { /// Drops the `Arc`. /// - /// This will decrement the strong reference count. If the strong reference count becomes zero - /// and the only other references are `Weak` ones, `drop`s the inner value. + /// This will decrement the strong reference count. If the strong reference + /// count becomes zero and the only other references are `Weak` ones, + /// `drop`s the inner value. /// /// # Examples /// @@ -347,29 +350,32 @@ impl Drop for Arc { /// ``` #[inline] fn drop(&mut self) { - // This structure has #[unsafe_no_drop_flag], so this drop glue may run more than once (but - // it is guaranteed to be zeroed after the first if it's run more than once) + // This structure has #[unsafe_no_drop_flag], so this drop glue may run + // more than once (but it is guaranteed to be zeroed after the first if + // it's run more than once) let ptr = *self._ptr; if ptr.is_null() { return } - // Because `fetch_sub` is already atomic, we do not need to synchronize with other threads - // unless we are going to delete the object. This same logic applies to the below - // `fetch_sub` to the `weak` count. + // Because `fetch_sub` is already atomic, we do not need to synchronize + // with other threads unless we are going to delete the object. This + // same logic applies to the below `fetch_sub` to the `weak` count. if self.inner().strong.fetch_sub(1, Release) != 1 { return } - // This fence is needed to prevent reordering of use of the data and deletion of the data. - // Because it is marked `Release`, the decreasing of the reference count synchronizes with - // this `Acquire` fence. This means that use of the data happens before decreasing the - // reference count, which happens before this fence, which happens before the deletion of - // the data. + // This fence is needed to prevent reordering of use of the data and + // deletion of the data. Because it is marked `Release`, the decreasing + // of the reference count synchronizes with this `Acquire` fence. This + // means that use of the data happens before decreasing the reference + // count, which happens before this fence, which happens before the + // deletion of the data. // // As explained in the [Boost documentation][1], // - // > It is important to enforce any possible access to the object in one thread (through an - // > existing reference) to *happen before* deleting the object in a different thread. This - // > is achieved by a "release" operation after dropping a reference (any access to the - // > object through this reference must obviously happened before), and an "acquire" - // > operation before deleting the object. + // > It is important to enforce any possible access to the object in one + // > thread (through an existing reference) to *happen before* deleting + // > the object in a different thread. This is achieved by a "release" + // > operation after dropping a reference (any access to the object + // > through this reference must obviously happened before), and an + // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) atomic::fence(Acquire); @@ -387,7 +393,8 @@ impl Weak { /// /// Upgrades the `Weak` reference to an `Arc`, if possible. /// - /// Returns `None` if there were no strong references and the data was destroyed. + /// Returns `None` if there were no strong references and the data was + /// destroyed. /// /// # Examples /// @@ -402,8 +409,8 @@ impl Weak { /// let strong_five: Option> = weak_five.upgrade(); /// ``` pub fn upgrade(&self) -> Option> { - // We use a CAS loop to increment the strong count instead of a fetch_add because once the - // count hits 0 is must never be above 0. + // We use a CAS loop to increment the strong count instead of a + // fetch_add because once the count hits 0 is must never be above 0. let inner = self.inner(); loop { let n = inner.strong.load(SeqCst); @@ -480,8 +487,9 @@ impl Drop for Weak { // see comments above for why this check is here if ptr.is_null() { return } - // If we find out that we were the last weak pointer, then its time to deallocate the data - // entirely. See the discussion in Arc::drop() about the memory orderings + // If we find out that we were the last weak pointer, then its time to + // deallocate the data entirely. See the discussion in Arc::drop() about + // the memory orderings if self.inner().weak.fetch_sub(1, Release) == 1 { atomic::fence(Acquire); unsafe { deallocate(ptr as *mut u8, size_of::>(), diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index e4b09bba529..eb3c5c16726 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -59,12 +59,12 @@ //! //! drop(gadget_owner); //! -//! // Despite dropping gadget_owner, we're still able to print out the name of -//! // the Owner of the Gadgets. This is because we've only dropped the +//! // Despite dropping gadget_owner, we're still able to print out the name +//! // of the Owner of the Gadgets. This is because we've only dropped the //! // reference count object, not the Owner it wraps. As long as there are -//! // other `Rc` objects pointing at the same Owner, it will remain allocated. Notice -//! // that the `Rc` wrapper around Gadget.owner gets automatically dereferenced -//! // for us. +//! // other `Rc` objects pointing at the same Owner, it will remain +//! // allocated. Notice that the `Rc` wrapper around Gadget.owner gets +//! // automatically dereferenced for us. //! println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name); //! println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name); //! @@ -74,19 +74,22 @@ //! } //! ``` //! -//! If our requirements change, and we also need to be able to traverse from Owner → Gadget, we -//! will run into problems: an `Rc` pointer from Owner → Gadget introduces a cycle between the -//! objects. This means that their reference counts can never reach 0, and the objects will remain -//! allocated: a memory leak. In order to get around this, we can use `Weak` pointers. These -//! pointers don't contribute to the total count. +//! If our requirements change, and we also need to be able to traverse from +//! Owner → Gadget, we will run into problems: an `Rc` pointer from Owner +//! → Gadget introduces a cycle between the objects. This means that their +//! reference counts can never reach 0, and the objects will remain allocated: a +//! memory leak. In order to get around this, we can use `Weak` pointers. +//! These pointers don't contribute to the total count. //! -//! Rust actually makes it somewhat difficult to produce this loop in the first place: in order to -//! end up with two objects that point at each other, one of them needs to be mutable. This is -//! problematic because `Rc` enforces memory safety by only giving out shared references to the -//! object it wraps, and these don't allow direct mutation. We need to wrap the part of the object -//! we wish to mutate in a `RefCell`, which provides *interior mutability*: a method to achieve -//! mutability through a shared reference. `RefCell` enforces Rust's borrowing rules at runtime. -//! Read the `Cell` documentation for more details on interior mutability. +//! Rust actually makes it somewhat difficult to produce this loop in the first +//! place: in order to end up with two objects that point at each other, one of +//! them needs to be mutable. This is problematic because `Rc` enforces +//! memory safety by only giving out shared references to the object it wraps, +//! and these don't allow direct mutation. We need to wrap the part of the +//! object we wish to mutate in a `RefCell`, which provides *interior +//! mutability*: a method to achieve mutability through a shared reference. +//! `RefCell` enforces Rust's borrowing rules at runtime. Read the `Cell` +//! documentation for more details on interior mutability. //! //! ```rust //! # #![feature(alloc)] @@ -130,9 +133,10 @@ //! for gadget_opt in gadget_owner.gadgets.borrow().iter() { //! //! // gadget_opt is a Weak. Since weak pointers can't guarantee -//! // that their object is still allocated, we need to call upgrade() on them -//! // to turn them into a strong reference. This returns an Option, which -//! // contains a reference to our object if it still exists. +//! // that their object is still allocated, we need to call upgrade() +//! // on them to turn them into a strong reference. This returns an +//! // Option, which contains a reference to our object if it still +//! // exists. //! let gadget = gadget_opt.upgrade().unwrap(); //! println!("Gadget {} owned by {}", gadget.id, gadget.owner.name); //! } @@ -180,8 +184,8 @@ struct RcBox { #[unsafe_no_drop_flag] #[stable(feature = "rust1", since = "1.0.0")] pub struct Rc { - // FIXME #12808: strange names to try to avoid interfering with field accesses of the contained - // type via Deref + // FIXME #12808: strange names to try to avoid interfering with field + // accesses of the contained type via Deref _ptr: NonZero<*mut RcBox>, } @@ -203,9 +207,10 @@ impl Rc { pub fn new(value: T) -> Rc { unsafe { Rc { - // there is an implicit weak pointer owned by all the strong pointers, which - // ensures that the weak destructor never frees the allocation while the strong - // destructor is running, even if the weak pointer is stored inside the strong one. + // there is an implicit weak pointer owned by all the strong + // pointers, which ensures that the weak destructor never frees + // the allocation while the strong destructor is running, even + // if the weak pointer is stored inside the strong one. _ptr: NonZero::new(boxed::into_raw(box RcBox { value: value, strong: Cell::new(1), @@ -245,7 +250,8 @@ pub fn weak_count(this: &Rc) -> usize { this.weak() - 1 } #[unstable(feature = "alloc")] pub fn strong_count(this: &Rc) -> usize { this.strong() } -/// Returns true if there are no other `Rc` or `Weak` values that share the same inner value. +/// Returns true if there are no other `Rc` or `Weak` values that share the +/// same inner value. /// /// # Examples /// @@ -330,8 +336,8 @@ pub fn get_mut<'a, T>(rc: &'a mut Rc) -> Option<&'a mut T> { impl Rc { /// Make a mutable reference from the given `Rc`. /// - /// This is also referred to as a copy-on-write operation because the inner data is cloned if - /// the reference count is greater than one. + /// This is also referred to as a copy-on-write operation because the inner + /// data is cloned if the reference count is greater than one. /// /// # Examples /// @@ -349,10 +355,11 @@ impl Rc { if !is_unique(self) { *self = Rc::new((**self).clone()) } - // This unsafety is ok because we're guaranteed that the pointer returned is the *only* - // pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at - // this point, and we required the `Rc` itself to be `mut`, so we're returning the only - // possible reference to the inner value. + // This unsafety is ok because we're guaranteed that the pointer + // returned is the *only* pointer that will ever be returned to T. Our + // reference count is guaranteed to be 1 at this point, and we required + // the `Rc` itself to be `mut`, so we're returning the only possible + // reference to the inner value. let inner = unsafe { &mut **self._ptr }; &mut inner.value } @@ -373,8 +380,9 @@ impl Deref for Rc { impl Drop for Rc { /// Drops the `Rc`. /// - /// This will decrement the strong reference count. If the strong reference count becomes zero - /// and the only other references are `Weak` ones, `drop`s the inner value. + /// This will decrement the strong reference count. If the strong reference + /// count becomes zero and the only other references are `Weak` ones, + /// `drop`s the inner value. /// /// # Examples /// @@ -404,8 +412,8 @@ impl Drop for Rc { if self.strong() == 0 { ptr::read(&**self); // destroy the contained object - // remove the implicit "strong weak" pointer now that we've destroyed the - // contents. + // remove the implicit "strong weak" pointer now that we've + // destroyed the contents. self.dec_weak(); if self.weak() == 0 { @@ -627,7 +635,8 @@ impl fmt::Debug for Rc { /// A weak version of `Rc`. /// -/// Weak references do not count when determining if the inner value should be dropped. +/// Weak references do not count when determining if the inner value should be +/// dropped. /// /// See the [module level documentation](./index.html) for more. #[unsafe_no_drop_flag] @@ -652,7 +661,8 @@ impl Weak { /// /// Upgrades the `Weak` reference to an `Rc`, if possible. /// - /// Returns `None` if there were no strong references and the data was destroyed. + /// Returns `None` if there were no strong references and the data was + /// destroyed. /// /// # Examples /// @@ -710,8 +720,8 @@ impl Drop for Weak { let ptr = *self._ptr; if !ptr.is_null() { self.dec_weak(); - // the weak count starts at 1, and will only go to zero if all the strong pointers - // have disappeared. + // the weak count starts at 1, and will only go to zero if all + // the strong pointers have disappeared. if self.weak() == 0 { deallocate(ptr as *mut u8, size_of::>(), min_align_of::>()) diff --git a/src/libcollections/vec.rs b/src/libcollections/vec.rs index e360c0b840b..59819d01bc6 100644 --- a/src/libcollections/vec.rs +++ b/src/libcollections/vec.rs @@ -8,7 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! A growable list type with heap-allocated contents, written `Vec` but pronounced 'vector.' +//! A growable list type with heap-allocated contents, written `Vec` but +//! pronounced 'vector.' //! //! Vectors have `O(1)` indexing, push (to the end) and pop (from the end). //! @@ -124,17 +125,19 @@ use borrow::{Cow, IntoCow}; /// /// # Capacity and reallocation /// -/// The capacity of a vector is the amount of space allocated for any future elements that will be -/// added onto the vector. This is not to be confused with the *length* of a vector, which -/// specifies the number of actual elements within the vector. If a vector's length exceeds its -/// capacity, its capacity will automatically be increased, but its elements will have to be +/// The capacity of a vector is the amount of space allocated for any future +/// elements that will be added onto the vector. This is not to be confused with +/// the *length* of a vector, which specifies the number of actual elements +/// within the vector. If a vector's length exceeds its capacity, its capacity +/// will automatically be increased, but its elements will have to be /// reallocated. /// -/// For example, a vector with capacity 10 and length 0 would be an empty vector with space for 10 -/// more elements. Pushing 10 or fewer elements onto the vector will not change its capacity or -/// cause reallocation to occur. However, if the vector's length is increased to 11, it will have -/// to reallocate, which can be slow. For this reason, it is recommended to use -/// `Vec::with_capacity` whenever possible to specify how big the vector is expected to get. +/// For example, a vector with capacity 10 and length 0 would be an empty vector +/// with space for 10 more elements. Pushing 10 or fewer elements onto the +/// vector will not change its capacity or cause reallocation to occur. However, +/// if the vector's length is increased to 11, it will have to reallocate, which +/// can be slow. For this reason, it is recommended to use `Vec::with_capacity` +/// whenever possible to specify how big the vector is expected to get. #[unsafe_no_drop_flag] #[stable(feature = "rust1", since = "1.0.0")] pub struct Vec { @@ -1429,7 +1432,7 @@ impl ops::Index for Vec { #[cfg(not(stage0))] #[inline] fn index(&self, _index: ops::RangeFull) -> &[T] { - self.as_slice() + self } } @@ -1733,15 +1736,20 @@ impl AsRef<[T]> for Vec { #[stable(feature = "rust1", since = "1.0.0")] impl<'a, T: Clone> From<&'a [T]> for Vec { + #[cfg(not(test))] fn from(s: &'a [T]) -> Vec { s.to_vec() } + #[cfg(test)] + fn from(s: &'a [T]) -> Vec { + ::slice::to_vec(s) + } } #[stable(feature = "rust1", since = "1.0.0")] impl<'a> From<&'a str> for Vec { fn from(s: &'a str) -> Vec { - s.as_bytes().to_vec() + From::from(s.as_bytes()) } } diff --git a/src/libcollectionstest/lib.rs b/src/libcollectionstest/lib.rs index 365ef637a4c..f03a073e274 100644 --- a/src/libcollectionstest/lib.rs +++ b/src/libcollectionstest/lib.rs @@ -20,6 +20,7 @@ #![feature(unboxed_closures)] #![feature(unicode)] #![feature(unsafe_destructor)] +#![feature(into_cow)] #![cfg_attr(test, feature(str_char))] #[macro_use] extern crate log; diff --git a/src/libgraphviz/lib.rs b/src/libgraphviz/lib.rs index 9a6e77af28e..ccf4a3f48d9 100644 --- a/src/libgraphviz/lib.rs +++ b/src/libgraphviz/lib.rs @@ -47,13 +47,13 @@ //! which is cyclic. //! //! ```rust -//! # #![feature(rustc_private, core)] +//! # #![feature(rustc_private, core, into_cow)] //! use std::borrow::IntoCow; //! use std::io::Write; //! use graphviz as dot; //! -//! type Nd = int; -//! type Ed = (int,int); +//! type Nd = isize; +//! type Ed = (isize,isize); //! struct Edges(Vec); //! //! pub fn render_to(output: &mut W) { @@ -133,7 +133,7 @@ //! direct reference to the `(source,target)` pair stored in the graph's //! internal vector (rather than passing around a copy of the pair //! itself). Note that this implies that `fn edges(&'a self)` must -//! construct a fresh `Vec<&'a (uint,uint)>` from the `Vec<(uint,uint)>` +//! construct a fresh `Vec<&'a (usize,usize)>` from the `Vec<(usize,usize)>` //! edges stored in `self`. //! //! Since both the set of nodes and the set of edges are always @@ -149,14 +149,14 @@ //! entity `&sube`). //! //! ```rust -//! # #![feature(rustc_private, core)] +//! # #![feature(rustc_private, core, into_cow)] //! use std::borrow::IntoCow; //! use std::io::Write; //! use graphviz as dot; //! -//! type Nd = uint; -//! type Ed<'a> = &'a (uint, uint); -//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> } +//! type Nd = usize; +//! type Ed<'a> = &'a (usize, usize); +//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(usize,usize)> } //! //! pub fn render_to(output: &mut W) { //! let nodes = vec!("{x,y}","{x}","{y}","{}"); @@ -207,14 +207,14 @@ //! Hasse-diagram for the subsets of the set `{x, y}`. //! //! ```rust -//! # #![feature(rustc_private, core)] +//! # #![feature(rustc_private, core, into_cow)] //! use std::borrow::IntoCow; //! use std::io::Write; //! use graphviz as dot; //! -//! type Nd<'a> = (uint, &'a str); +//! type Nd<'a> = (usize, &'a str); //! type Ed<'a> = (Nd<'a>, Nd<'a>); -//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(uint,uint)> } +//! struct Graph { nodes: Vec<&'static str>, edges: Vec<(usize,usize)> } //! //! pub fn render_to(output: &mut W) { //! let nodes = vec!("{x,y}","{x}","{y}","{}"); @@ -231,7 +231,7 @@ //! } //! fn node_label<'b>(&'b self, n: &Nd<'b>) -> dot::LabelText<'b> { //! let &(i, _) = n; -//! dot::LabelText::LabelStr(self.nodes[i].as_slice().into_cow()) +//! dot::LabelText::LabelStr(self.nodes[i].into_cow()) //! } //! fn edge_label<'b>(&'b self, _: &Ed<'b>) -> dot::LabelText<'b> { //! dot::LabelText::LabelStr("⊆".into_cow()) @@ -240,12 +240,12 @@ //! //! impl<'a> dot::GraphWalk<'a, Nd<'a>, Ed<'a>> for Graph { //! fn nodes(&'a self) -> dot::Nodes<'a,Nd<'a>> { -//! self.nodes.iter().map(|s|s.as_slice()).enumerate().collect() +//! self.nodes.iter().map(|s| &s[..]).enumerate().collect() //! } //! fn edges(&'a self) -> dot::Edges<'a,Ed<'a>> { //! self.edges.iter() -//! .map(|&(i,j)|((i, self.nodes[i].as_slice()), -//! (j, self.nodes[j].as_slice()))) +//! .map(|&(i,j)|((i, &self.nodes[i][..]), +//! (j, &self.nodes[j][..]))) //! .collect() //! } //! fn source(&self, e: &Ed<'a>) -> Nd<'a> { let &(s,_) = e; s } @@ -385,7 +385,7 @@ impl<'a> Id<'a> { is_letter_or_underscore(c) || in_range('0', c, '9') } fn in_range(low: char, c: char, high: char) -> bool { - low as uint <= c as uint && c as uint <= high as uint + low as usize <= c as usize && c as usize <= high as usize } } @@ -602,12 +602,12 @@ mod tests { use std::iter::repeat; /// each node is an index in a vector in the graph. - type Node = uint; + type Node = usize; struct Edge { - from: uint, to: uint, label: &'static str + from: usize, to: usize, label: &'static str } - fn edge(from: uint, to: uint, label: &'static str) -> Edge { + fn edge(from: usize, to: usize, label: &'static str) -> Edge { Edge { from: from, to: to, label: label } } @@ -637,7 +637,7 @@ mod tests { enum NodeLabels { AllNodesLabelled(Vec), - UnlabelledNodes(uint), + UnlabelledNodes(usize), SomeNodesLabelled(Vec>), } diff --git a/src/librustc_back/lib.rs b/src/librustc_back/lib.rs index b2e12a91ec8..63727a573a3 100644 --- a/src/librustc_back/lib.rs +++ b/src/librustc_back/lib.rs @@ -47,7 +47,6 @@ #![feature(rand)] #![feature(path_ext)] #![feature(std_misc)] -#![feature(path_relative_from)] #![feature(step_by)] #![feature(convert)] #![cfg_attr(test, feature(test, rand))] diff --git a/src/librustc_back/rpath.rs b/src/librustc_back/rpath.rs index 10d17e266ed..ff3f0b78f91 100644 --- a/src/librustc_back/rpath.rs +++ b/src/librustc_back/rpath.rs @@ -228,7 +228,7 @@ mod test { used_crates: Vec::new(), has_rpath: true, is_like_osx: true, - out_filename: PathBuf::new("bin/rustc"), + out_filename: PathBuf::from("bin/rustc"), get_install_prefix_lib_path: &mut || panic!(), realpath: &mut |p| Ok(p.to_path_buf()), }; @@ -238,7 +238,7 @@ mod test { } else { let config = &mut RPathConfig { used_crates: Vec::new(), - out_filename: PathBuf::new("bin/rustc"), + out_filename: PathBuf::from("bin/rustc"), get_install_prefix_lib_path: &mut || panic!(), has_rpath: true, is_like_osx: false, diff --git a/src/librustc_lint/lib.rs b/src/librustc_lint/lib.rs index 99b3393c003..ef65acf8b13 100644 --- a/src/librustc_lint/lib.rs +++ b/src/librustc_lint/lib.rs @@ -40,7 +40,6 @@ #![feature(rustc_private)] #![feature(unsafe_destructor)] #![feature(staged_api)] -#![feature(std_misc)] #![feature(str_char)] #![cfg_attr(test, feature(test))] diff --git a/src/librustc_trans/trans/asm.rs b/src/librustc_trans/trans/asm.rs index 33817bb952e..d6c85e8b173 100644 --- a/src/librustc_trans/trans/asm.rs +++ b/src/librustc_trans/trans/asm.rs @@ -81,7 +81,7 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm) // Default per-arch clobbers // Basically what clang does - let arch_clobbers = match bcx.sess().target.target.arch.as_slice() { + let arch_clobbers = match &bcx.sess().target.target.arch[..] { "x86" | "x86_64" => vec!("~{dirflag}", "~{fpsr}", "~{flags}"), _ => Vec::new() }; diff --git a/src/librustc_typeck/lib.rs b/src/librustc_typeck/lib.rs index 6bdfb17ec1c..4e7e63a5d77 100644 --- a/src/librustc_typeck/lib.rs +++ b/src/librustc_typeck/lib.rs @@ -80,7 +80,6 @@ This API is completely unstable and subject to change. #![feature(collections)] #![feature(core)] #![feature(int_uint)] -#![feature(std_misc)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] #![feature(rustc_private)] diff --git a/src/libstd/env.rs b/src/libstd/env.rs index 00ce6917835..fd7532ea4a7 100644 --- a/src/libstd/env.rs +++ b/src/libstd/env.rs @@ -327,12 +327,13 @@ pub struct JoinPathsError { /// # Examples /// /// ``` +/// # #![feature(convert)] /// use std::env; /// use std::path::PathBuf; /// /// if let Some(path) = env::var_os("PATH") { /// let mut paths = env::split_paths(&path).collect::>(); -/// paths.push(PathBuf::new("/home/xyz/bin")); +/// paths.push(PathBuf::from("/home/xyz/bin")); /// let new_path = env::join_paths(paths.iter()).unwrap(); /// env::set_var("PATH", &new_path); /// } @@ -853,7 +854,7 @@ mod tests { fn split_paths_unix() { fn check_parse(unparsed: &str, parsed: &[&str]) -> bool { split_paths(unparsed).collect::>() == - parsed.iter().map(|s| PathBuf::new(*s)).collect::>() + parsed.iter().map(|s| PathBuf::from(*s)).collect::>() } assert!(check_parse("", &mut [""])); diff --git a/src/libstd/path.rs b/src/libstd/path.rs index 8ee33e94fe7..50f79967f55 100644 --- a/src/libstd/path.rs +++ b/src/libstd/path.rs @@ -35,9 +35,10 @@ //! To build or modify paths, use `PathBuf`: //! //! ```rust +//! # #![feature(convert)] //! use std::path::PathBuf; //! -//! let mut path = PathBuf::new("c:\\"); +//! let mut path = PathBuf::from("c:\\"); //! path.push("windows"); //! path.push("system32"); //! path.set_extension("dll"); @@ -892,9 +893,10 @@ impl<'a> cmp::Ord for Components<'a> { /// # Examples /// /// ``` +/// # #![feature(convert)] /// use std::path::PathBuf; /// -/// let mut path = PathBuf::new("c:\\"); +/// let mut path = PathBuf::from("c:\\"); /// path.push("windows"); /// path.push("system32"); /// path.set_extension("dll"); @@ -983,15 +985,16 @@ impl PathBuf { /// # Examples /// /// ``` + /// # #![feature(convert)] /// use std::path::PathBuf; /// - /// let mut buf = PathBuf::new("/"); + /// let mut buf = PathBuf::from("/"); /// assert!(buf.file_name() == None); /// buf.set_file_name("bar"); - /// assert!(buf == PathBuf::new("/bar")); + /// assert!(buf == PathBuf::from("/bar")); /// assert!(buf.file_name().is_some()); /// buf.set_file_name("baz.txt"); - /// assert!(buf == PathBuf::new("/baz.txt")); + /// assert!(buf == PathBuf::from("/baz.txt")); /// ``` #[stable(feature = "rust1", since = "1.0.0")] pub fn set_file_name>(&mut self, file_name: S) { @@ -1661,7 +1664,7 @@ mod tests { let static_path = Path::new("/home/foo"); let static_cow_path: Cow<'static, Path> = static_path.into_cow(); - let pathbuf = PathBuf::new("/home/foo"); + let pathbuf = PathBuf::from("/home/foo"); { let path: &Path = &pathbuf; @@ -2543,7 +2546,7 @@ mod tests { pub fn test_push() { macro_rules! tp( ($path:expr, $push:expr, $expected:expr) => ( { - let mut actual = PathBuf::new($path); + let mut actual = PathBuf::from($path); actual.push($push); assert!(actual.to_str() == Some($expected), "pushing {:?} onto {:?}: Expected {:?}, got {:?}", @@ -2631,7 +2634,7 @@ mod tests { pub fn test_pop() { macro_rules! tp( ($path:expr, $expected:expr, $output:expr) => ( { - let mut actual = PathBuf::new($path); + let mut actual = PathBuf::from($path); let output = actual.pop(); assert!(actual.to_str() == Some($expected) && output == $output, "popping from {:?}: Expected {:?}/{:?}, got {:?}/{:?}", @@ -2685,7 +2688,7 @@ mod tests { pub fn test_set_file_name() { macro_rules! tfn( ($path:expr, $file:expr, $expected:expr) => ( { - let mut p = PathBuf::new($path); + let mut p = PathBuf::from($path); p.set_file_name($file); assert!(p.to_str() == Some($expected), "setting file name of {:?} to {:?}: Expected {:?}, got {:?}", @@ -2719,7 +2722,7 @@ mod tests { pub fn test_set_extension() { macro_rules! tfe( ($path:expr, $ext:expr, $expected:expr, $output:expr) => ( { - let mut p = PathBuf::new($path); + let mut p = PathBuf::from($path); let output = p.set_extension($ext); assert!(p.to_str() == Some($expected) && output == $output, "setting extension of {:?} to {:?}: Expected {:?}/{:?}, got {:?}/{:?}", diff --git a/src/libstd/process.rs b/src/libstd/process.rs index d11c3d22144..553412c8371 100644 --- a/src/libstd/process.rs +++ b/src/libstd/process.rs @@ -770,7 +770,7 @@ mod tests { // test changing to the parent of os::getcwd() because we know // the path exists (and os::getcwd() is not expected to be root) let parent_dir = os::getcwd().unwrap().dir_path(); - let result = pwd_cmd().current_dir(&parent_dir).output().unwrap(); + let result = pwd_cmd().current_dir(parent_dir.as_str().unwrap()).output().unwrap(); let output = String::from_utf8(result.stdout).unwrap(); let child_dir = old_path::Path::new(output.trim()); diff --git a/src/libstd/sys/unix/thread.rs b/src/libstd/sys/unix/thread.rs index eb2a6dc08bf..eb61f21aacd 100644 --- a/src/libstd/sys/unix/thread.rs +++ b/src/libstd/sys/unix/thread.rs @@ -13,14 +13,12 @@ use core::prelude::*; use cmp; -use dynamic_lib::DynamicLibrary; use ffi::CString; use io; use libc::consts::os::posix01::PTHREAD_STACK_MIN; use libc; use mem; use ptr; -use sync::{Once, ONCE_INIT}; use sys::os; use thunk::Thunk; use time::Duration; @@ -322,6 +320,9 @@ pub fn sleep(dur: Duration) { // dependency on libc6 (#23628). #[cfg(target_os = "linux")] fn min_stack_size(attr: *const libc::pthread_attr_t) -> libc::size_t { + use dynamic_lib::DynamicLibrary; + use sync::{Once, ONCE_INIT}; + type F = unsafe extern "C" fn(*const libc::pthread_attr_t) -> libc::size_t; static INIT: Once = ONCE_INIT; static mut __pthread_get_minstack: Option = None; diff --git a/src/libstd/sys/windows/fs2.rs b/src/libstd/sys/windows/fs2.rs index 117f819eeeb..99835265111 100644 --- a/src/libstd/sys/windows/fs2.rs +++ b/src/libstd/sys/windows/fs2.rs @@ -372,7 +372,7 @@ pub fn readlink(p: &Path) -> io::Result { sz - 1, libc::VOLUME_NAME_DOS) }, |s| OsStringExt::from_wide(s))); - Ok(PathBuf::new(&ret)) + Ok(PathBuf::from(&ret)) } pub fn symlink(src: &Path, dst: &Path) -> io::Result<()> { diff --git a/src/libstd/sys/windows/mod.rs b/src/libstd/sys/windows/mod.rs index eeaf4ced072..b1ceac9b902 100644 --- a/src/libstd/sys/windows/mod.rs +++ b/src/libstd/sys/windows/mod.rs @@ -304,9 +304,7 @@ fn fill_utf16_buf_new(f1: F1, f2: F2) -> io::Result } fn os2path(s: &[u16]) -> PathBuf { - let os = ::from_wide(s); - // FIXME(#22751) should consume `os` - PathBuf::new(&os) + PathBuf::from(OsString::from_wide(s)) } pub fn truncate_utf16_at_nul<'a>(v: &'a [u16]) -> &'a [u16] { diff --git a/src/libstd/sys/windows/os.rs b/src/libstd/sys/windows/os.rs index 4f6c4c9aab3..83d06371734 100644 --- a/src/libstd/sys/windows/os.rs +++ b/src/libstd/sys/windows/os.rs @@ -363,10 +363,7 @@ pub fn temp_dir() -> PathBuf { pub fn home_dir() -> Option { getenv("HOME".as_os_str()).or_else(|| { getenv("USERPROFILE".as_os_str()) - }).map(|os| { - // FIXME(#22751) should consume `os` - PathBuf::new(&os) - }).or_else(|| unsafe { + }).map(PathBuf::from).or_else(|| unsafe { let me = c::GetCurrentProcess(); let mut token = ptr::null_mut(); if c::OpenProcessToken(me, c::TOKEN_READ, &mut token) == 0 { diff --git a/src/libstd/thread/scoped.rs b/src/libstd/thread/scoped.rs index d57535391fd..b384879d7a9 100644 --- a/src/libstd/thread/scoped.rs +++ b/src/libstd/thread/scoped.rs @@ -24,7 +24,7 @@ //! # Examples //! //! ``` -//! # #![feature(std_misc)] +//! # #![feature(scoped_tls)] //! scoped_thread_local!(static FOO: u32); //! //! // Initially each scoped slot is empty. @@ -147,7 +147,7 @@ impl ScopedKey { /// # Examples /// /// ``` - /// # #![feature(std_misc)] + /// # #![feature(scoped_tls)] /// scoped_thread_local!(static FOO: u32); /// /// FOO.set(&100, || { @@ -200,7 +200,7 @@ impl ScopedKey { /// # Examples /// /// ```no_run - /// # #![feature(std_misc)] + /// # #![feature(scoped_tls)] /// scoped_thread_local!(static FOO: u32); /// /// FOO.with(|slot| { diff --git a/src/test/run-make/issue-19371/foo.rs b/src/test/run-make/issue-19371/foo.rs index b089b9269a2..0d42e0be58d 100644 --- a/src/test/run-make/issue-19371/foo.rs +++ b/src/test/run-make/issue-19371/foo.rs @@ -8,7 +8,7 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#![feature(rustc_private, path)] +#![feature(rustc_private, path, convert)] extern crate rustc; extern crate rustc_driver; @@ -33,9 +33,9 @@ fn main() { panic!("expected rustc path"); } - let tmpdir = PathBuf::new(&args[1]); + let tmpdir = PathBuf::from(&args[1]); - let mut sysroot = PathBuf::new(&args[3]); + let mut sysroot = PathBuf::from(&args[3]); sysroot.pop(); sysroot.pop(); diff --git a/src/test/run-pass/create-dir-all-bare.rs b/src/test/run-pass/create-dir-all-bare.rs index 3a4286c2927..475df629f63 100644 --- a/src/test/run-pass/create-dir-all-bare.rs +++ b/src/test/run-pass/create-dir-all-bare.rs @@ -8,6 +8,8 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +#![feature(tempdir)] + use std::env; use std::fs::{self, TempDir}; diff --git a/src/test/run-pass/issue-20797.rs b/src/test/run-pass/issue-20797.rs index 4dbe7c968a7..d0720ec593f 100644 --- a/src/test/run-pass/issue-20797.rs +++ b/src/test/run-pass/issue-20797.rs @@ -12,12 +12,12 @@ // pretty-expanded FIXME #23616 -#![feature(old_io, old_path)] +#![feature(convert)] use std::default::Default; use std::io; use std::fs; -use std::path::{PathBuf, Path}; +use std::path::PathBuf; pub trait PathExtensions { fn is_dir(&self) -> bool { false } @@ -98,8 +98,8 @@ impl Iterator for Subpaths { } } -fn foo() { - let mut walker: Subpaths = Subpaths::walk(&PathBuf::new("/home")).unwrap(); +fn _foo() { + let _walker: Subpaths = Subpaths::walk(&PathBuf::from("/home")).unwrap(); } fn main() {} diff --git a/src/test/run-pass/send_str_hashmap.rs b/src/test/run-pass/send_str_hashmap.rs index 7bef36d0656..d109f7abde4 100644 --- a/src/test/run-pass/send_str_hashmap.rs +++ b/src/test/run-pass/send_str_hashmap.rs @@ -10,7 +10,7 @@ // pretty-expanded FIXME #23616 -#![feature(collections)] +#![feature(collections, into_cow)] extern crate collections; diff --git a/src/test/run-pass/send_str_treemap.rs b/src/test/run-pass/send_str_treemap.rs index 04a4a239b0f..07dd5443348 100644 --- a/src/test/run-pass/send_str_treemap.rs +++ b/src/test/run-pass/send_str_treemap.rs @@ -10,7 +10,7 @@ // pretty-expanded FIXME #23616 -#![feature(collections)] +#![feature(collections, into_cow)] extern crate collections; diff --git a/src/test/run-pass/tcp-stress.rs b/src/test/run-pass/tcp-stress.rs index e06e6883a75..489abf163c0 100644 --- a/src/test/run-pass/tcp-stress.rs +++ b/src/test/run-pass/tcp-stress.rs @@ -13,6 +13,9 @@ // ignore-openbsd system ulimit (Too many open files) // exec-env:RUST_LOG=debug +#![feature(rustc_private, libc, old_io, io, std_misc)] +#![allow(deprecated, unused_must_use)] + #[macro_use] extern crate log; extern crate libc; diff --git a/src/test/run-pass/ufcs-polymorphic-paths.rs b/src/test/run-pass/ufcs-polymorphic-paths.rs index e05a60dbc7f..a6ea0f76dc2 100644 --- a/src/test/run-pass/ufcs-polymorphic-paths.rs +++ b/src/test/run-pass/ufcs-polymorphic-paths.rs @@ -10,7 +10,7 @@ // pretty-expanded FIXME #23616 -#![feature(collections, rand)] +#![feature(collections, rand, into_cow)] use std::borrow::{Cow, IntoCow}; use std::collections::BitVec; -- cgit 1.4.1-3-g733a5