diff options
| author | bors <bors@rust-lang.org> | 2015-03-24 17:38:09 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2015-03-24 17:38:09 +0000 |
| commit | ed810385045ab0db90303574ba3ea47dfa2a36d5 (patch) | |
| tree | 161242c800aca625a26c56551fa5adb446c0089f /src/liballoc | |
| parent | 28a0b25f424090255966273994748a9f9901059f (diff) | |
| parent | d252d0ad5434bcf77076729ab766eeff98f20ead (diff) | |
| download | rust-ed810385045ab0db90303574ba3ea47dfa2a36d5.tar.gz rust-ed810385045ab0db90303574ba3ea47dfa2a36d5.zip | |
Auto merge of #23654 - alexcrichton:rollup, r=alexcrichton
Diffstat (limited to 'src/liballoc')
| -rw-r--r-- | src/liballoc/arc.rs | 84 | ||||
| -rw-r--r-- | src/liballoc/boxed.rs | 3 | ||||
| -rw-r--r-- | src/liballoc/heap.rs | 3 | ||||
| -rw-r--r-- | src/liballoc/lib.rs | 1 | ||||
| -rw-r--r-- | src/liballoc/rc.rs | 102 |
5 files changed, 119 insertions, 74 deletions
diff --git a/src/liballoc/arc.rs b/src/liballoc/arc.rs index 8528be2860c..c9bbc0d74cd 100644 --- a/src/liballoc/arc.rs +++ b/src/liballoc/arc.rs @@ -95,6 +95,7 @@ use heap::deallocate; /// task. /// /// ``` +/// # #![feature(alloc, core)] /// use std::sync::Arc; /// use std::thread; /// @@ -127,8 +128,8 @@ unsafe impl<T: Sync + Send> Sync for Arc<T> { } /// A weak pointer to an `Arc`. /// -/// Weak pointers will not keep the data inside of the `Arc` alive, and can be used to break cycles -/// between `Arc` pointers. +/// Weak pointers will not keep the data inside of the `Arc` alive, and can be +/// used to break cycles between `Arc` pointers. #[unsafe_no_drop_flag] #[unstable(feature = "alloc", reason = "Weak pointers may not belong in this module.")] @@ -185,6 +186,7 @@ impl<T> Arc<T> { /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::sync::Arc; /// /// let five = Arc::new(5); @@ -216,8 +218,8 @@ impl<T> Arc<T> { unsafe fn drop_slow(&mut self) { let ptr = *self._ptr; - // Destroy the data at this time, even though we may not free the box allocation itself - // (there may still be weak pointers lying around). + // Destroy the data at this time, even though we may not free the box + // allocation itself (there may still be weak pointers lying around). drop(ptr::read(&self.inner().data)); if self.inner().weak.fetch_sub(1, Release) == 1 { @@ -246,6 +248,7 @@ impl<T> Clone for Arc<T> { /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::sync::Arc; /// /// let five = Arc::new(5); @@ -283,12 +286,13 @@ impl<T> Deref for Arc<T> { impl<T: Send + Sync + Clone> Arc<T> { /// Make a mutable reference from the given `Arc<T>`. /// - /// This is also referred to as a copy-on-write operation because the inner data is cloned if - /// the reference count is greater than one. + /// This is also referred to as a copy-on-write operation because the inner + /// data is cloned if the reference count is greater than one. /// /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::sync::Arc; /// /// let mut five = Arc::new(5); @@ -298,16 +302,18 @@ impl<T: Send + Sync + Clone> Arc<T> { #[inline] #[unstable(feature = "alloc")] pub fn make_unique(&mut self) -> &mut T { - // Note that we hold a strong reference, which also counts as a weak reference, so we only - // clone if there is an additional reference of either kind. + // Note that we hold a strong reference, which also counts as a weak + // reference, so we only clone if there is an additional reference of + // either kind. if self.inner().strong.load(SeqCst) != 1 || self.inner().weak.load(SeqCst) != 1 { *self = Arc::new((**self).clone()) } - // This unsafety is ok because we're guaranteed that the pointer returned is the *only* - // pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at - // this point, and we required the Arc itself to be `mut`, so we're returning the only - // possible reference to the inner data. + // This unsafety is ok because we're guaranteed that the pointer + // returned is the *only* pointer that will ever be returned to T. Our + // reference count is guaranteed to be 1 at this point, and we required + // the Arc itself to be `mut`, so we're returning the only possible + // reference to the inner data. let inner = unsafe { &mut **self._ptr }; &mut inner.data } @@ -318,12 +324,14 @@ impl<T: Send + Sync + Clone> Arc<T> { impl<T: Sync + Send> Drop for Arc<T> { /// Drops the `Arc<T>`. /// - /// This will decrement the strong reference count. If the strong reference count becomes zero - /// and the only other references are `Weak<T>` ones, `drop`s the inner value. + /// This will decrement the strong reference count. If the strong reference + /// count becomes zero and the only other references are `Weak<T>` ones, + /// `drop`s the inner value. /// /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::sync::Arc; /// /// { @@ -342,29 +350,32 @@ impl<T: Sync + Send> Drop for Arc<T> { /// ``` #[inline] fn drop(&mut self) { - // This structure has #[unsafe_no_drop_flag], so this drop glue may run more than once (but - // it is guaranteed to be zeroed after the first if it's run more than once) + // This structure has #[unsafe_no_drop_flag], so this drop glue may run + // more than once (but it is guaranteed to be zeroed after the first if + // it's run more than once) let ptr = *self._ptr; if ptr.is_null() { return } - // Because `fetch_sub` is already atomic, we do not need to synchronize with other threads - // unless we are going to delete the object. This same logic applies to the below - // `fetch_sub` to the `weak` count. + // Because `fetch_sub` is already atomic, we do not need to synchronize + // with other threads unless we are going to delete the object. This + // same logic applies to the below `fetch_sub` to the `weak` count. if self.inner().strong.fetch_sub(1, Release) != 1 { return } - // This fence is needed to prevent reordering of use of the data and deletion of the data. - // Because it is marked `Release`, the decreasing of the reference count synchronizes with - // this `Acquire` fence. This means that use of the data happens before decreasing the - // reference count, which happens before this fence, which happens before the deletion of - // the data. + // This fence is needed to prevent reordering of use of the data and + // deletion of the data. Because it is marked `Release`, the decreasing + // of the reference count synchronizes with this `Acquire` fence. This + // means that use of the data happens before decreasing the reference + // count, which happens before this fence, which happens before the + // deletion of the data. // // As explained in the [Boost documentation][1], // - // > It is important to enforce any possible access to the object in one thread (through an - // > existing reference) to *happen before* deleting the object in a different thread. This - // > is achieved by a "release" operation after dropping a reference (any access to the - // > object through this reference must obviously happened before), and an "acquire" - // > operation before deleting the object. + // > It is important to enforce any possible access to the object in one + // > thread (through an existing reference) to *happen before* deleting + // > the object in a different thread. This is achieved by a "release" + // > operation after dropping a reference (any access to the object + // > through this reference must obviously happened before), and an + // > "acquire" operation before deleting the object. // // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html) atomic::fence(Acquire); @@ -382,11 +393,13 @@ impl<T: Sync + Send> Weak<T> { /// /// Upgrades the `Weak<T>` reference to an `Arc<T>`, if possible. /// - /// Returns `None` if there were no strong references and the data was destroyed. + /// Returns `None` if there were no strong references and the data was + /// destroyed. /// /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::sync::Arc; /// /// let five = Arc::new(5); @@ -396,8 +409,8 @@ impl<T: Sync + Send> Weak<T> { /// let strong_five: Option<Arc<_>> = weak_five.upgrade(); /// ``` pub fn upgrade(&self) -> Option<Arc<T>> { - // We use a CAS loop to increment the strong count instead of a fetch_add because once the - // count hits 0 is must never be above 0. + // We use a CAS loop to increment the strong count instead of a + // fetch_add because once the count hits 0 is must never be above 0. let inner = self.inner(); loop { let n = inner.strong.load(SeqCst); @@ -424,6 +437,7 @@ impl<T: Sync + Send> Clone for Weak<T> { /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::sync::Arc; /// /// let weak_five = Arc::new(5).downgrade(); @@ -448,6 +462,7 @@ impl<T: Sync + Send> Drop for Weak<T> { /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::sync::Arc; /// /// { @@ -472,8 +487,9 @@ impl<T: Sync + Send> Drop for Weak<T> { // see comments above for why this check is here if ptr.is_null() { return } - // If we find out that we were the last weak pointer, then its time to deallocate the data - // entirely. See the discussion in Arc::drop() about the memory orderings + // If we find out that we were the last weak pointer, then its time to + // deallocate the data entirely. See the discussion in Arc::drop() about + // the memory orderings if self.inner().weak.fetch_sub(1, Release) == 1 { atomic::fence(Acquire); unsafe { deallocate(ptr as *mut u8, size_of::<ArcInner<T>>(), diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs index 6bdfe2b1551..8b18fbf554a 100644 --- a/src/liballoc/boxed.rs +++ b/src/liballoc/boxed.rs @@ -65,6 +65,7 @@ use core::raw::TraitObject; /// The following two examples are equivalent: /// /// ``` +/// # #![feature(alloc)] /// #![feature(box_syntax)] /// use std::boxed::HEAP; /// @@ -135,6 +136,7 @@ impl<T : ?Sized> Box<T> { /// /// # Examples /// ``` +/// # #![feature(alloc)] /// use std::boxed; /// /// let seventeen = Box::new(17u32); @@ -178,6 +180,7 @@ impl<T: Clone> Clone for Box<T> { /// # Examples /// /// ``` + /// # #![feature(alloc, core)] /// let x = Box::new(5); /// let mut y = Box::new(10); /// diff --git a/src/liballoc/heap.rs b/src/liballoc/heap.rs index aaf6e76237c..3733350412e 100644 --- a/src/liballoc/heap.rs +++ b/src/liballoc/heap.rs @@ -26,6 +26,9 @@ pub unsafe fn allocate(size: usize, align: usize) -> *mut u8 { /// /// On failure, return a null pointer and leave the original allocation intact. /// +/// If the allocation was relocated, the memory at the passed-in pointer is +/// undefined after the call. +/// /// Behavior is undefined if the requested size is 0 or the alignment is not a /// power of 2. The alignment must be no larger than the largest supported page /// size on the platform. diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs index 34c0686fe37..541de2d37fb 100644 --- a/src/liballoc/lib.rs +++ b/src/liballoc/lib.rs @@ -66,6 +66,7 @@ #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", html_favicon_url = "http://www.rust-lang.org/favicon.ico", html_root_url = "http://doc.rust-lang.org/nightly/")] +#![doc(test(no_crate_inject))] #![feature(no_std)] #![no_std] diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 855235e89c8..eb3c5c16726 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -32,6 +32,7 @@ //! and have the `Owner` remain allocated as long as any `Gadget` points at it. //! //! ```rust +//! # #![feature(alloc, collections)] //! use std::rc::Rc; //! //! struct Owner { @@ -58,12 +59,12 @@ //! //! drop(gadget_owner); //! -//! // Despite dropping gadget_owner, we're still able to print out the name of -//! // the Owner of the Gadgets. This is because we've only dropped the +//! // Despite dropping gadget_owner, we're still able to print out the name +//! // of the Owner of the Gadgets. This is because we've only dropped the //! // reference count object, not the Owner it wraps. As long as there are -//! // other `Rc<T>` objects pointing at the same Owner, it will remain allocated. Notice -//! // that the `Rc<T>` wrapper around Gadget.owner gets automatically dereferenced -//! // for us. +//! // other `Rc<T>` objects pointing at the same Owner, it will remain +//! // allocated. Notice that the `Rc<T>` wrapper around Gadget.owner gets +//! // automatically dereferenced for us. //! println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name); //! println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name); //! @@ -73,21 +74,25 @@ //! } //! ``` //! -//! If our requirements change, and we also need to be able to traverse from Owner → Gadget, we -//! will run into problems: an `Rc<T>` pointer from Owner → Gadget introduces a cycle between the -//! objects. This means that their reference counts can never reach 0, and the objects will remain -//! allocated: a memory leak. In order to get around this, we can use `Weak<T>` pointers. These -//! pointers don't contribute to the total count. +//! If our requirements change, and we also need to be able to traverse from +//! Owner → Gadget, we will run into problems: an `Rc<T>` pointer from Owner +//! → Gadget introduces a cycle between the objects. This means that their +//! reference counts can never reach 0, and the objects will remain allocated: a +//! memory leak. In order to get around this, we can use `Weak<T>` pointers. +//! These pointers don't contribute to the total count. //! -//! Rust actually makes it somewhat difficult to produce this loop in the first place: in order to -//! end up with two objects that point at each other, one of them needs to be mutable. This is -//! problematic because `Rc<T>` enforces memory safety by only giving out shared references to the -//! object it wraps, and these don't allow direct mutation. We need to wrap the part of the object -//! we wish to mutate in a `RefCell`, which provides *interior mutability*: a method to achieve -//! mutability through a shared reference. `RefCell` enforces Rust's borrowing rules at runtime. -//! Read the `Cell` documentation for more details on interior mutability. +//! Rust actually makes it somewhat difficult to produce this loop in the first +//! place: in order to end up with two objects that point at each other, one of +//! them needs to be mutable. This is problematic because `Rc<T>` enforces +//! memory safety by only giving out shared references to the object it wraps, +//! and these don't allow direct mutation. We need to wrap the part of the +//! object we wish to mutate in a `RefCell`, which provides *interior +//! mutability*: a method to achieve mutability through a shared reference. +//! `RefCell` enforces Rust's borrowing rules at runtime. Read the `Cell` +//! documentation for more details on interior mutability. //! //! ```rust +//! # #![feature(alloc)] //! use std::rc::Rc; //! use std::rc::Weak; //! use std::cell::RefCell; @@ -128,9 +133,10 @@ //! for gadget_opt in gadget_owner.gadgets.borrow().iter() { //! //! // gadget_opt is a Weak<Gadget>. Since weak pointers can't guarantee -//! // that their object is still allocated, we need to call upgrade() on them -//! // to turn them into a strong reference. This returns an Option, which -//! // contains a reference to our object if it still exists. +//! // that their object is still allocated, we need to call upgrade() +//! // on them to turn them into a strong reference. This returns an +//! // Option, which contains a reference to our object if it still +//! // exists. //! let gadget = gadget_opt.upgrade().unwrap(); //! println!("Gadget {} owned by {}", gadget.id, gadget.owner.name); //! } @@ -178,8 +184,8 @@ struct RcBox<T> { #[unsafe_no_drop_flag] #[stable(feature = "rust1", since = "1.0.0")] pub struct Rc<T> { - // FIXME #12808: strange names to try to avoid interfering with field accesses of the contained - // type via Deref + // FIXME #12808: strange names to try to avoid interfering with field + // accesses of the contained type via Deref _ptr: NonZero<*mut RcBox<T>>, } @@ -201,9 +207,10 @@ impl<T> Rc<T> { pub fn new(value: T) -> Rc<T> { unsafe { Rc { - // there is an implicit weak pointer owned by all the strong pointers, which - // ensures that the weak destructor never frees the allocation while the strong - // destructor is running, even if the weak pointer is stored inside the strong one. + // there is an implicit weak pointer owned by all the strong + // pointers, which ensures that the weak destructor never frees + // the allocation while the strong destructor is running, even + // if the weak pointer is stored inside the strong one. _ptr: NonZero::new(boxed::into_raw(box RcBox { value: value, strong: Cell::new(1), @@ -218,6 +225,7 @@ impl<T> Rc<T> { /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::rc::Rc; /// /// let five = Rc::new(5); @@ -242,11 +250,13 @@ pub fn weak_count<T>(this: &Rc<T>) -> usize { this.weak() - 1 } #[unstable(feature = "alloc")] pub fn strong_count<T>(this: &Rc<T>) -> usize { this.strong() } -/// Returns true if there are no other `Rc` or `Weak<T>` values that share the same inner value. +/// Returns true if there are no other `Rc` or `Weak<T>` values that share the +/// same inner value. /// /// # Examples /// /// ``` +/// # #![feature(alloc)] /// use std::rc; /// use std::rc::Rc; /// @@ -267,6 +277,7 @@ pub fn is_unique<T>(rc: &Rc<T>) -> bool { /// # Examples /// /// ``` +/// # #![feature(alloc)] /// use std::rc::{self, Rc}; /// /// let x = Rc::new(3); @@ -301,6 +312,7 @@ pub fn try_unwrap<T>(rc: Rc<T>) -> Result<T, Rc<T>> { /// # Examples /// /// ``` +/// # #![feature(alloc)] /// use std::rc::{self, Rc}; /// /// let mut x = Rc::new(3); @@ -324,12 +336,13 @@ pub fn get_mut<'a, T>(rc: &'a mut Rc<T>) -> Option<&'a mut T> { impl<T: Clone> Rc<T> { /// Make a mutable reference from the given `Rc<T>`. /// - /// This is also referred to as a copy-on-write operation because the inner data is cloned if - /// the reference count is greater than one. + /// This is also referred to as a copy-on-write operation because the inner + /// data is cloned if the reference count is greater than one. /// /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::rc::Rc; /// /// let mut five = Rc::new(5); @@ -342,10 +355,11 @@ impl<T: Clone> Rc<T> { if !is_unique(self) { *self = Rc::new((**self).clone()) } - // This unsafety is ok because we're guaranteed that the pointer returned is the *only* - // pointer that will ever be returned to T. Our reference count is guaranteed to be 1 at - // this point, and we required the `Rc<T>` itself to be `mut`, so we're returning the only - // possible reference to the inner value. + // This unsafety is ok because we're guaranteed that the pointer + // returned is the *only* pointer that will ever be returned to T. Our + // reference count is guaranteed to be 1 at this point, and we required + // the `Rc<T>` itself to be `mut`, so we're returning the only possible + // reference to the inner value. let inner = unsafe { &mut **self._ptr }; &mut inner.value } @@ -366,12 +380,14 @@ impl<T> Deref for Rc<T> { impl<T> Drop for Rc<T> { /// Drops the `Rc<T>`. /// - /// This will decrement the strong reference count. If the strong reference count becomes zero - /// and the only other references are `Weak<T>` ones, `drop`s the inner value. + /// This will decrement the strong reference count. If the strong reference + /// count becomes zero and the only other references are `Weak<T>` ones, + /// `drop`s the inner value. /// /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::rc::Rc; /// /// { @@ -396,8 +412,8 @@ impl<T> Drop for Rc<T> { if self.strong() == 0 { ptr::read(&**self); // destroy the contained object - // remove the implicit "strong weak" pointer now that we've destroyed the - // contents. + // remove the implicit "strong weak" pointer now that we've + // destroyed the contents. self.dec_weak(); if self.weak() == 0 { @@ -420,6 +436,7 @@ impl<T> Clone for Rc<T> { /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::rc::Rc; /// /// let five = Rc::new(5); @@ -618,7 +635,8 @@ impl<T: fmt::Debug> fmt::Debug for Rc<T> { /// A weak version of `Rc<T>`. /// -/// Weak references do not count when determining if the inner value should be dropped. +/// Weak references do not count when determining if the inner value should be +/// dropped. /// /// See the [module level documentation](./index.html) for more. #[unsafe_no_drop_flag] @@ -643,11 +661,13 @@ impl<T> Weak<T> { /// /// Upgrades the `Weak<T>` reference to an `Rc<T>`, if possible. /// - /// Returns `None` if there were no strong references and the data was destroyed. + /// Returns `None` if there were no strong references and the data was + /// destroyed. /// /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::rc::Rc; /// /// let five = Rc::new(5); @@ -676,6 +696,7 @@ impl<T> Drop for Weak<T> { /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::rc::Rc; /// /// { @@ -699,8 +720,8 @@ impl<T> Drop for Weak<T> { let ptr = *self._ptr; if !ptr.is_null() { self.dec_weak(); - // the weak count starts at 1, and will only go to zero if all the strong pointers - // have disappeared. + // the weak count starts at 1, and will only go to zero if all + // the strong pointers have disappeared. if self.weak() == 0 { deallocate(ptr as *mut u8, size_of::<RcBox<T>>(), min_align_of::<RcBox<T>>()) @@ -721,6 +742,7 @@ impl<T> Clone for Weak<T> { /// # Examples /// /// ``` + /// # #![feature(alloc)] /// use std::rc::Rc; /// /// let weak_five = Rc::new(5).downgrade(); |
