about summary refs log tree commit diff
path: root/src/liballoc
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2019-07-13 06:49:02 +0000
committerbors <bors@rust-lang.org>2019-07-13 06:49:02 +0000
commit4a95e9704de0eeaecba55df102c1129e79a3a929 (patch)
tree363bc1700a2826f38735027195160fa84ecefc2c /src/liballoc
parenta9c7febb879689a3d24e3ba34531026930313c4c (diff)
parent85def307fc83f8c0d164b1506bb855dfaed5f8b5 (diff)
downloadrust-4a95e9704de0eeaecba55df102c1129e79a3a929.tar.gz
rust-4a95e9704de0eeaecba55df102c1129e79a3a929.zip
Auto merge of #61953 - Centril:shared-from-iter, r=RalfJung
Add `impl<T> FromIterator<T> for Arc/Rc<[T]>`

Add implementations of `FromIterator<T> for Arc/Rc<[T]>` with symmetrical logic.

This also takes advantage of specialization in the case of iterators with known length (`TrustedLen`) to elide the final allocation/copying from a `Vec<T>` into `Rc<[T]>` because we can allocate the space for the `Rc<[T]>` directly when the size is known. This is the primary motivation and why this is to be preferred over `iter.collect::<Vec<_>>().into(): Rc<[T]>`.

Moreover, this PR does some refactoring in some places.

r? @RalfJung for the code
cc @alexcrichton from T-libs
Diffstat (limited to 'src/liballoc')
-rw-r--r--src/liballoc/lib.rs1
-rw-r--r--src/liballoc/rc.rs287
-rw-r--r--src/liballoc/sync.rs264
-rw-r--r--src/liballoc/tests/arc.rs121
-rw-r--r--src/liballoc/tests/lib.rs2
-rw-r--r--src/liballoc/tests/rc.rs117
6 files changed, 652 insertions, 140 deletions
diff --git a/src/liballoc/lib.rs b/src/liballoc/lib.rs
index 0750665c6b4..2e48825e81c 100644
--- a/src/liballoc/lib.rs
+++ b/src/liballoc/lib.rs
@@ -93,6 +93,7 @@
 #![feature(ptr_offset_from)]
 #![feature(rustc_attrs)]
 #![feature(receiver_trait)]
+#![feature(slice_from_raw_parts)]
 #![feature(specialization)]
 #![feature(staged_api)]
 #![feature(std_internals)]
diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs
index 45a8b4a25c8..36d54656795 100644
--- a/src/liballoc/rc.rs
+++ b/src/liballoc/rc.rs
@@ -238,12 +238,13 @@ use core::cmp::Ordering;
 use core::fmt;
 use core::hash::{Hash, Hasher};
 use core::intrinsics::abort;
+use core::iter;
 use core::marker::{self, Unpin, Unsize, PhantomData};
 use core::mem::{self, align_of, align_of_val, forget, size_of_val};
 use core::ops::{Deref, Receiver, CoerceUnsized, DispatchFromDyn};
 use core::pin::Pin;
 use core::ptr::{self, NonNull};
-use core::slice::from_raw_parts_mut;
+use core::slice::{self, from_raw_parts_mut};
 use core::convert::From;
 use core::usize;
 
@@ -286,6 +287,19 @@ impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
 #[unstable(feature = "dispatch_from_dyn", issue = "0")]
 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Rc<U>> for Rc<T> {}
 
+impl<T: ?Sized> Rc<T> {
+    fn from_inner(ptr: NonNull<RcBox<T>>) -> Self {
+        Self {
+            ptr,
+            phantom: PhantomData,
+        }
+    }
+
+    unsafe fn from_ptr(ptr: *mut RcBox<T>) -> Self {
+        Self::from_inner(NonNull::new_unchecked(ptr))
+    }
+}
+
 impl<T> Rc<T> {
     /// Constructs a new `Rc<T>`.
     ///
@@ -298,18 +312,15 @@ impl<T> Rc<T> {
     /// ```
     #[stable(feature = "rust1", since = "1.0.0")]
     pub fn new(value: T) -> Rc<T> {
-        Rc {
-            // there is an implicit weak pointer owned by all the strong
-            // pointers, which ensures that the weak destructor never frees
-            // the allocation while the strong destructor is running, even
-            // if the weak pointer is stored inside the strong one.
-            ptr: Box::into_raw_non_null(box RcBox {
-                strong: Cell::new(1),
-                weak: Cell::new(1),
-                value,
-            }),
-            phantom: PhantomData,
-        }
+        // There is an implicit weak pointer owned by all the strong
+        // pointers, which ensures that the weak destructor never frees
+        // the allocation while the strong destructor is running, even
+        // if the weak pointer is stored inside the strong one.
+        Self::from_inner(Box::into_raw_non_null(box RcBox {
+            strong: Cell::new(1),
+            weak: Cell::new(1),
+            value,
+        }))
     }
 
     /// Constructs a new `Pin<Rc<T>>`. If `T` does not implement `Unpin`, then
@@ -422,10 +433,7 @@ impl<T: ?Sized> Rc<T> {
         let fake_ptr = ptr as *mut RcBox<T>;
         let rc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
 
-        Rc {
-            ptr: NonNull::new_unchecked(rc_ptr),
-            phantom: PhantomData,
-        }
+        Self::from_ptr(rc_ptr)
     }
 
     /// Consumes the `Rc`, returning the wrapped pointer as `NonNull<T>`.
@@ -683,7 +691,7 @@ impl Rc<dyn Any> {
         if (*self).is::<T>() {
             let ptr = self.ptr.cast::<RcBox<T>>();
             forget(self);
-            Ok(Rc { ptr, phantom: PhantomData })
+            Ok(Rc::from_inner(ptr))
         } else {
             Err(self)
         }
@@ -691,21 +699,29 @@ impl Rc<dyn Any> {
 }
 
 impl<T: ?Sized> Rc<T> {
-    // Allocates an `RcBox<T>` with sufficient space for an unsized value
-    unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox<T> {
-        // Calculate layout using the given value.
+    /// Allocates an `RcBox<T>` with sufficient space for
+    /// an unsized value where the value has the layout provided.
+    ///
+    /// The function `mem_to_rcbox` is called with the data pointer
+    /// and must return back a (potentially fat)-pointer for the `RcBox<T>`.
+    unsafe fn allocate_for_unsized(
+        value_layout: Layout,
+        mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>
+    ) -> *mut RcBox<T> {
+        // Calculate layout using the given value layout.
         // Previously, layout was calculated on the expression
         // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
         // reference (see #54908).
         let layout = Layout::new::<RcBox<()>>()
-            .extend(Layout::for_value(&*ptr)).unwrap().0
+            .extend(value_layout).unwrap().0
             .pad_to_align().unwrap();
 
+        // Allocate for the layout.
         let mem = Global.alloc(layout)
             .unwrap_or_else(|_| handle_alloc_error(layout));
 
         // Initialize the RcBox
-        let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut RcBox<T>;
+        let inner = mem_to_rcbox(mem.as_ptr());
         debug_assert_eq!(Layout::for_value(&*inner), layout);
 
         ptr::write(&mut (*inner).strong, Cell::new(1));
@@ -714,6 +730,15 @@ impl<T: ?Sized> Rc<T> {
         inner
     }
 
+    /// Allocates an `RcBox<T>` with sufficient space for an unsized value
+    unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox<T> {
+        // Allocate for the `RcBox<T>` using the given value.
+        Self::allocate_for_unsized(
+            Layout::for_value(&*ptr),
+            |mem| set_data_ptr(ptr as *mut T, mem) as *mut RcBox<T>,
+        )
+    }
+
     fn from_box(v: Box<T>) -> Rc<T> {
         unsafe {
             let box_unique = Box::into_unique(v);
@@ -731,44 +756,49 @@ impl<T: ?Sized> Rc<T> {
             // Free the allocation without dropping its contents
             box_free(box_unique);
 
-            Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+            Self::from_ptr(ptr)
         }
     }
 }
 
-// Sets the data pointer of a `?Sized` raw pointer.
-//
-// For a slice/trait object, this sets the `data` field and leaves the rest
-// unchanged. For a sized raw pointer, this simply sets the pointer.
+impl<T> Rc<[T]> {
+    /// Allocates an `RcBox<[T]>` with the given length.
+    unsafe fn allocate_for_slice(len: usize) -> *mut RcBox<[T]> {
+        Self::allocate_for_unsized(
+            Layout::array::<T>(len).unwrap(),
+            |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut RcBox<[T]>,
+        )
+    }
+}
+
+/// Sets the data pointer of a `?Sized` raw pointer.
+///
+/// For a slice/trait object, this sets the `data` field and leaves the rest
+/// unchanged. For a sized raw pointer, this simply sets the pointer.
 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
     ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
     ptr
 }
 
 impl<T> Rc<[T]> {
-    // Copy elements from slice into newly allocated Rc<[T]>
-    //
-    // Unsafe because the caller must either take ownership or bind `T: Copy`
+    /// Copy elements from slice into newly allocated Rc<[T]>
+    ///
+    /// Unsafe because the caller must either take ownership or bind `T: Copy`
     unsafe fn copy_from_slice(v: &[T]) -> Rc<[T]> {
-        let v_ptr = v as *const [T];
-        let ptr = Self::allocate_for_ptr(v_ptr);
+        let ptr = Self::allocate_for_slice(v.len());
 
         ptr::copy_nonoverlapping(
             v.as_ptr(),
             &mut (*ptr).value as *mut [T] as *mut T,
             v.len());
 
-        Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+        Self::from_ptr(ptr)
     }
-}
 
-trait RcFromSlice<T> {
-    fn from_slice(slice: &[T]) -> Self;
-}
-
-impl<T: Clone> RcFromSlice<T> for Rc<[T]> {
-    #[inline]
-    default fn from_slice(v: &[T]) -> Self {
+    /// Constructs an `Rc<[T]>` from an iterator known to be of a certain size.
+    ///
+    /// Behavior is undefined should the size be wrong.
+    unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Rc<[T]> {
         // Panic guard while cloning T elements.
         // In the event of a panic, elements that have been written
         // into the new RcBox will be dropped, then the memory freed.
@@ -790,32 +820,43 @@ impl<T: Clone> RcFromSlice<T> for Rc<[T]> {
             }
         }
 
-        unsafe {
-            let v_ptr = v as *const [T];
-            let ptr = Self::allocate_for_ptr(v_ptr);
+        let ptr = Self::allocate_for_slice(len);
 
-            let mem = ptr as *mut _ as *mut u8;
-            let layout = Layout::for_value(&*ptr);
+        let mem = ptr as *mut _ as *mut u8;
+        let layout = Layout::for_value(&*ptr);
 
-            // Pointer to first element
-            let elems = &mut (*ptr).value as *mut [T] as *mut T;
+        // Pointer to first element
+        let elems = &mut (*ptr).value as *mut [T] as *mut T;
 
-            let mut guard = Guard{
-                mem: NonNull::new_unchecked(mem),
-                elems: elems,
-                layout: layout,
-                n_elems: 0,
-            };
+        let mut guard = Guard {
+            mem: NonNull::new_unchecked(mem),
+            elems,
+            layout,
+            n_elems: 0,
+        };
 
-            for (i, item) in v.iter().enumerate() {
-                ptr::write(elems.add(i), item.clone());
-                guard.n_elems += 1;
-            }
+        for (i, item) in iter.enumerate() {
+            ptr::write(elems.add(i), item);
+            guard.n_elems += 1;
+        }
+
+        // All clear. Forget the guard so it doesn't free the new RcBox.
+        forget(guard);
+
+        Self::from_ptr(ptr)
+    }
+}
 
-            // All clear. Forget the guard so it doesn't free the new RcBox.
-            forget(guard);
+/// Specialization trait used for `From<&[T]>`.
+trait RcFromSlice<T> {
+    fn from_slice(slice: &[T]) -> Self;
+}
 
-            Rc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+impl<T: Clone> RcFromSlice<T> for Rc<[T]> {
+    #[inline]
+    default fn from_slice(v: &[T]) -> Self {
+        unsafe {
+            Self::from_iter_exact(v.iter().cloned(), v.len())
         }
     }
 }
@@ -907,7 +948,7 @@ impl<T: ?Sized> Clone for Rc<T> {
     #[inline]
     fn clone(&self) -> Rc<T> {
         self.inc_strong();
-        Rc { ptr: self.ptr, phantom: PhantomData }
+        Self::from_inner(self.ptr)
     }
 }
 
@@ -1213,6 +1254,98 @@ impl<T> From<Vec<T>> for Rc<[T]> {
     }
 }
 
+#[stable(feature = "shared_from_iter", since = "1.37.0")]
+impl<T> iter::FromIterator<T> for Rc<[T]> {
+    /// Takes each element in the `Iterator` and collects it into an `Rc<[T]>`.
+    ///
+    /// # Performance characteristics
+    ///
+    /// ## The general case
+    ///
+    /// In the general case, collecting into `Rc<[T]>` is done by first
+    /// collecting into a `Vec<T>`. That is, when writing the following:
+    ///
+    /// ```rust
+    /// # use std::rc::Rc;
+    /// let evens: Rc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
+    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+    /// ```
+    ///
+    /// this behaves as if we wrote:
+    ///
+    /// ```rust
+    /// # use std::rc::Rc;
+    /// let evens: Rc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
+    ///     .collect::<Vec<_>>() // The first set of allocations happens here.
+    ///     .into(); // A second allocation for `Rc<[T]>` happens here.
+    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+    /// ```
+    ///
+    /// This will allocate as many times as needed for constructing the `Vec<T>`
+    /// and then it will allocate once for turning the `Vec<T>` into the `Rc<[T]>`.
+    ///
+    /// ## Iterators of known length
+    ///
+    /// When your `Iterator` implements `TrustedLen` and is of an exact size,
+    /// a single allocation will be made for the `Rc<[T]>`. For example:
+    ///
+    /// ```rust
+    /// # use std::rc::Rc;
+    /// let evens: Rc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
+    /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
+    /// ```
+    fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+        RcFromIter::from_iter(iter.into_iter())
+    }
+}
+
+/// Specialization trait used for collecting into `Rc<[T]>`.
+trait RcFromIter<T, I> {
+    fn from_iter(iter: I) -> Self;
+}
+
+impl<T, I: Iterator<Item = T>> RcFromIter<T, I> for Rc<[T]> {
+    default fn from_iter(iter: I) -> Self {
+        iter.collect::<Vec<T>>().into()
+    }
+}
+
+impl<T, I: iter::TrustedLen<Item = T>> RcFromIter<T, I> for Rc<[T]>  {
+    default fn from_iter(iter: I) -> Self {
+        // This is the case for a `TrustedLen` iterator.
+        let (low, high) = iter.size_hint();
+        if let Some(high) = high {
+            debug_assert_eq!(
+                low, high,
+                "TrustedLen iterator's size hint is not exact: {:?}",
+                (low, high)
+            );
+
+            unsafe {
+                // SAFETY: We need to ensure that the iterator has an exact length and we have.
+                Rc::from_iter_exact(iter, low)
+            }
+        } else {
+            // Fall back to normal implementation.
+            iter.collect::<Vec<T>>().into()
+        }
+    }
+}
+
+impl<'a, T: 'a + Clone> RcFromIter<&'a T, slice::Iter<'a, T>> for Rc<[T]> {
+    fn from_iter(iter: slice::Iter<'a, T>) -> Self {
+        // Delegate to `impl<T: Clone> From<&[T]> for Rc<[T]>`.
+        //
+        // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
+        // which is even more performant.
+        //
+        // In the fall-back case we have `T: Clone`. This is still better
+        // than the `TrustedLen` implementation as slices have a known length
+        // and so we get to avoid calling `size_hint` and avoid the branching.
+        iter.as_slice().into()
+    }
+}
+
 /// `Weak` is a version of [`Rc`] that holds a non-owning reference to the
 /// managed value. The value is accessed by calling [`upgrade`] on the `Weak`
 /// pointer, which returns an [`Option`]`<`[`Rc`]`<T>>`.
@@ -1456,7 +1589,7 @@ impl<T: ?Sized> Weak<T> {
             None
         } else {
             inner.inc_strong();
-            Some(Rc { ptr: self.ptr, phantom: PhantomData })
+            Some(Rc::from_inner(self.ptr))
         }
     }
 
@@ -1660,14 +1793,16 @@ trait RcBoxPtr<T: ?Sized> {
 
     #[inline]
     fn inc_strong(&self) {
+        let strong = self.strong();
+
         // We want to abort on overflow instead of dropping the value.
         // The reference count will never be zero when this is called;
         // nevertheless, we insert an abort here to hint LLVM at
         // an otherwise missed optimization.
-        if self.strong() == 0 || self.strong() == usize::max_value() {
+        if strong == 0 || strong == usize::max_value() {
             unsafe { abort(); }
         }
-        self.inner().strong.set(self.strong() + 1);
+        self.inner().strong.set(strong + 1);
     }
 
     #[inline]
@@ -1682,14 +1817,16 @@ trait RcBoxPtr<T: ?Sized> {
 
     #[inline]
     fn inc_weak(&self) {
+        let weak = self.weak();
+
         // We want to abort on overflow instead of dropping the value.
         // The reference count will never be zero when this is called;
         // nevertheless, we insert an abort here to hint LLVM at
         // an otherwise missed optimization.
-        if self.weak() == 0 || self.weak() == usize::max_value() {
+        if weak == 0 || weak == usize::max_value() {
             unsafe { abort(); }
         }
-        self.inner().weak.set(self.weak() + 1);
+        self.inner().weak.set(weak + 1);
     }
 
     #[inline]
@@ -2162,18 +2299,20 @@ impl<T: ?Sized> AsRef<T> for Rc<T> {
 impl<T: ?Sized> Unpin for Rc<T> { }
 
 unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
-    // Align the unsized value to the end of the RcBox.
+    // Align the unsized value to the end of the `RcBox`.
     // Because it is ?Sized, it will always be the last field in memory.
-    let align = align_of_val(&*ptr);
-    let layout = Layout::new::<RcBox<()>>();
-    (layout.size() + layout.padding_needed_for(align)) as isize
+    data_offset_align(align_of_val(&*ptr))
 }
 
-/// Computes the offset of the data field within ArcInner.
+/// Computes the offset of the data field within `RcBox`.
 ///
 /// Unlike [`data_offset`], this doesn't need the pointer, but it works only on `T: Sized`.
 fn data_offset_sized<T>() -> isize {
-    let align = align_of::<T>();
+    data_offset_align(align_of::<T>())
+}
+
+#[inline]
+fn data_offset_align(align: usize) -> isize {
     let layout = Layout::new::<RcBox<()>>();
     (layout.size() + layout.padding_needed_for(align)) as isize
 }
diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs
index 126169b5c82..7cb826ee024 100644
--- a/src/liballoc/sync.rs
+++ b/src/liballoc/sync.rs
@@ -12,6 +12,7 @@ use core::sync::atomic::Ordering::{Acquire, Relaxed, Release, SeqCst};
 use core::borrow;
 use core::fmt;
 use core::cmp::{self, Ordering};
+use core::iter;
 use core::intrinsics::abort;
 use core::mem::{self, align_of, align_of_val, size_of_val};
 use core::ops::{Deref, Receiver, CoerceUnsized, DispatchFromDyn};
@@ -21,7 +22,7 @@ use core::marker::{Unpin, Unsize, PhantomData};
 use core::hash::{Hash, Hasher};
 use core::{isize, usize};
 use core::convert::From;
-use core::slice::from_raw_parts_mut;
+use core::slice::{self, from_raw_parts_mut};
 
 use crate::alloc::{Global, Alloc, Layout, box_free, handle_alloc_error};
 use crate::boxed::Box;
@@ -206,6 +207,19 @@ impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
 #[unstable(feature = "dispatch_from_dyn", issue = "0")]
 impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
 
+impl<T: ?Sized> Arc<T> {
+    fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
+        Self {
+            ptr,
+            phantom: PhantomData,
+        }
+    }
+
+    unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
+        Self::from_inner(NonNull::new_unchecked(ptr))
+    }
+}
+
 /// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
 /// managed value. The value is accessed by calling [`upgrade`] on the `Weak`
 /// pointer, which returns an [`Option`]`<`[`Arc`]`<T>>`.
@@ -290,7 +304,7 @@ impl<T> Arc<T> {
             weak: atomic::AtomicUsize::new(1),
             data,
         };
-        Arc { ptr: Box::into_raw_non_null(x), phantom: PhantomData }
+        Self::from_inner(Box::into_raw_non_null(x))
     }
 
     /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
@@ -403,10 +417,7 @@ impl<T: ?Sized> Arc<T> {
         let fake_ptr = ptr as *mut ArcInner<T>;
         let arc_ptr = set_data_ptr(fake_ptr, (ptr as *mut u8).offset(-offset));
 
-        Arc {
-            ptr: NonNull::new_unchecked(arc_ptr),
-            phantom: PhantomData,
-        }
+        Self::from_ptr(arc_ptr)
     }
 
     /// Consumes the `Arc`, returning the wrapped pointer as `NonNull<T>`.
@@ -577,21 +588,28 @@ impl<T: ?Sized> Arc<T> {
 }
 
 impl<T: ?Sized> Arc<T> {
-    // Allocates an `ArcInner<T>` with sufficient space for an unsized value
-    unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
-        // Calculate layout using the given value.
+    /// Allocates an `ArcInner<T>` with sufficient space for
+    /// an unsized value where the value has the layout provided.
+    ///
+    /// The function `mem_to_arcinner` is called with the data pointer
+    /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
+    unsafe fn allocate_for_unsized(
+        value_layout: Layout,
+        mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>
+    ) -> *mut ArcInner<T> {
+        // Calculate layout using the given value layout.
         // Previously, layout was calculated on the expression
         // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
         // reference (see #54908).
         let layout = Layout::new::<ArcInner<()>>()
-            .extend(Layout::for_value(&*ptr)).unwrap().0
+            .extend(value_layout).unwrap().0
             .pad_to_align().unwrap();
 
         let mem = Global.alloc(layout)
             .unwrap_or_else(|_| handle_alloc_error(layout));
 
         // Initialize the ArcInner
-        let inner = set_data_ptr(ptr as *mut T, mem.as_ptr() as *mut u8) as *mut ArcInner<T>;
+        let inner = mem_to_arcinner(mem.as_ptr());
         debug_assert_eq!(Layout::for_value(&*inner), layout);
 
         ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
@@ -600,6 +618,15 @@ impl<T: ?Sized> Arc<T> {
         inner
     }
 
+    /// Allocates an `ArcInner<T>` with sufficient space for an unsized value.
+    unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
+        // Allocate for the `ArcInner<T>` using the given value.
+        Self::allocate_for_unsized(
+            Layout::for_value(&*ptr),
+            |mem| set_data_ptr(ptr as *mut T, mem) as *mut ArcInner<T>,
+        )
+    }
+
     fn from_box(v: Box<T>) -> Arc<T> {
         unsafe {
             let box_unique = Box::into_unique(v);
@@ -617,45 +644,49 @@ impl<T: ?Sized> Arc<T> {
             // Free the allocation without dropping its contents
             box_free(box_unique);
 
-            Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+            Self::from_ptr(ptr)
         }
     }
 }
 
-// Sets the data pointer of a `?Sized` raw pointer.
-//
-// For a slice/trait object, this sets the `data` field and leaves the rest
-// unchanged. For a sized raw pointer, this simply sets the pointer.
+impl<T> Arc<[T]> {
+    /// Allocates an `ArcInner<[T]>` with the given length.
+    unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
+        Self::allocate_for_unsized(
+            Layout::array::<T>(len).unwrap(),
+            |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>,
+        )
+    }
+}
+
+/// Sets the data pointer of a `?Sized` raw pointer.
+///
+/// For a slice/trait object, this sets the `data` field and leaves the rest
+/// unchanged. For a sized raw pointer, this simply sets the pointer.
 unsafe fn set_data_ptr<T: ?Sized, U>(mut ptr: *mut T, data: *mut U) -> *mut T {
     ptr::write(&mut ptr as *mut _ as *mut *mut u8, data as *mut u8);
     ptr
 }
 
 impl<T> Arc<[T]> {
-    // Copy elements from slice into newly allocated Arc<[T]>
-    //
-    // Unsafe because the caller must either take ownership or bind `T: Copy`
+    /// Copy elements from slice into newly allocated Arc<[T]>
+    ///
+    /// Unsafe because the caller must either take ownership or bind `T: Copy`.
     unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
-        let v_ptr = v as *const [T];
-        let ptr = Self::allocate_for_ptr(v_ptr);
+        let ptr = Self::allocate_for_slice(v.len());
 
         ptr::copy_nonoverlapping(
             v.as_ptr(),
             &mut (*ptr).data as *mut [T] as *mut T,
             v.len());
 
-        Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+        Self::from_ptr(ptr)
     }
-}
 
-// Specialization trait used for From<&[T]>
-trait ArcFromSlice<T> {
-    fn from_slice(slice: &[T]) -> Self;
-}
-
-impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
-    #[inline]
-    default fn from_slice(v: &[T]) -> Self {
+    /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
+    ///
+    /// Behavior is undefined should the size be wrong.
+    unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> {
         // Panic guard while cloning T elements.
         // In the event of a panic, elements that have been written
         // into the new ArcInner will be dropped, then the memory freed.
@@ -677,32 +708,43 @@ impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
             }
         }
 
-        unsafe {
-            let v_ptr = v as *const [T];
-            let ptr = Self::allocate_for_ptr(v_ptr);
+        let ptr = Self::allocate_for_slice(len);
+
+        let mem = ptr as *mut _ as *mut u8;
+        let layout = Layout::for_value(&*ptr);
 
-            let mem = ptr as *mut _ as *mut u8;
-            let layout = Layout::for_value(&*ptr);
+        // Pointer to first element
+        let elems = &mut (*ptr).data as *mut [T] as *mut T;
 
-            // Pointer to first element
-            let elems = &mut (*ptr).data as *mut [T] as *mut T;
+        let mut guard = Guard {
+            mem: NonNull::new_unchecked(mem),
+            elems,
+            layout,
+            n_elems: 0,
+        };
 
-            let mut guard = Guard{
-                mem: NonNull::new_unchecked(mem),
-                elems: elems,
-                layout: layout,
-                n_elems: 0,
-            };
+        for (i, item) in iter.enumerate() {
+            ptr::write(elems.add(i), item);
+            guard.n_elems += 1;
+        }
 
-            for (i, item) in v.iter().enumerate() {
-                ptr::write(elems.add(i), item.clone());
-                guard.n_elems += 1;
-            }
+        // All clear. Forget the guard so it doesn't free the new ArcInner.
+        mem::forget(guard);
+
+        Self::from_ptr(ptr)
+    }
+}
 
-            // All clear. Forget the guard so it doesn't free the new ArcInner.
-            mem::forget(guard);
+/// Specialization trait used for `From<&[T]>`.
+trait ArcFromSlice<T> {
+    fn from_slice(slice: &[T]) -> Self;
+}
 
-            Arc { ptr: NonNull::new_unchecked(ptr), phantom: PhantomData }
+impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
+    #[inline]
+    default fn from_slice(v: &[T]) -> Self {
+        unsafe {
+            Self::from_iter_exact(v.iter().cloned(), v.len())
         }
     }
 }
@@ -760,7 +802,7 @@ impl<T: ?Sized> Clone for Arc<T> {
             }
         }
 
-        Arc { ptr: self.ptr, phantom: PhantomData }
+        Self::from_inner(self.ptr)
     }
 }
 
@@ -1039,7 +1081,7 @@ impl Arc<dyn Any + Send + Sync> {
         if (*self).is::<T>() {
             let ptr = self.ptr.cast::<ArcInner<T>>();
             mem::forget(self);
-            Ok(Arc { ptr, phantom: PhantomData })
+            Ok(Arc::from_inner(ptr))
         } else {
             Err(self)
         }
@@ -1260,11 +1302,7 @@ impl<T: ?Sized> Weak<T> {
 
             // Relaxed is valid for the same reason it is on Arc's Clone impl
             match inner.strong.compare_exchange_weak(n, n + 1, Relaxed, Relaxed) {
-                Ok(_) => return Some(Arc {
-                    // null checked above
-                    ptr: self.ptr,
-                    phantom: PhantomData,
-                }),
+                Ok(_) => return Some(Arc::from_inner(self.ptr)), // null checked above
                 Err(old) => n = old,
             }
         }
@@ -1785,6 +1823,98 @@ impl<T> From<Vec<T>> for Arc<[T]> {
     }
 }
 
+#[stable(feature = "shared_from_iter", since = "1.37.0")]
+impl<T> iter::FromIterator<T> for Arc<[T]> {
+    /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
+    ///
+    /// # Performance characteristics
+    ///
+    /// ## The general case
+    ///
+    /// In the general case, collecting into `Arc<[T]>` is done by first
+    /// collecting into a `Vec<T>`. That is, when writing the following:
+    ///
+    /// ```rust
+    /// # use std::sync::Arc;
+    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
+    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+    /// ```
+    ///
+    /// this behaves as if we wrote:
+    ///
+    /// ```rust
+    /// # use std::sync::Arc;
+    /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
+    ///     .collect::<Vec<_>>() // The first set of allocations happens here.
+    ///     .into(); // A second allocation for `Arc<[T]>` happens here.
+    /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+    /// ```
+    ///
+    /// This will allocate as many times as needed for constructing the `Vec<T>`
+    /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
+    ///
+    /// ## Iterators of known length
+    ///
+    /// When your `Iterator` implements `TrustedLen` and is of an exact size,
+    /// a single allocation will be made for the `Arc<[T]>`. For example:
+    ///
+    /// ```rust
+    /// # use std::sync::Arc;
+    /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
+    /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
+    /// ```
+    fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+        ArcFromIter::from_iter(iter.into_iter())
+    }
+}
+
+/// Specialization trait used for collecting into `Arc<[T]>`.
+trait ArcFromIter<T, I> {
+    fn from_iter(iter: I) -> Self;
+}
+
+impl<T, I: Iterator<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
+    default fn from_iter(iter: I) -> Self {
+        iter.collect::<Vec<T>>().into()
+    }
+}
+
+impl<T, I: iter::TrustedLen<Item = T>> ArcFromIter<T, I> for Arc<[T]> {
+    default fn from_iter(iter: I) -> Self {
+        // This is the case for a `TrustedLen` iterator.
+        let (low, high) = iter.size_hint();
+        if let Some(high) = high {
+            debug_assert_eq!(
+                low, high,
+                "TrustedLen iterator's size hint is not exact: {:?}",
+                (low, high)
+            );
+
+            unsafe {
+                // SAFETY: We need to ensure that the iterator has an exact length and we have.
+                Arc::from_iter_exact(iter, low)
+            }
+        } else {
+            // Fall back to normal implementation.
+            iter.collect::<Vec<T>>().into()
+        }
+    }
+}
+
+impl<'a, T: 'a + Clone> ArcFromIter<&'a T, slice::Iter<'a, T>> for Arc<[T]> {
+    fn from_iter(iter: slice::Iter<'a, T>) -> Self {
+        // Delegate to `impl<T: Clone> From<&[T]> for Arc<[T]>`.
+        //
+        // In the case that `T: Copy`, we get to use `ptr::copy_nonoverlapping`
+        // which is even more performant.
+        //
+        // In the fall-back case we have `T: Clone`. This is still better
+        // than the `TrustedLen` implementation as slices have a known length
+        // and so we get to avoid calling `size_hint` and avoid the branching.
+        iter.as_slice().into()
+    }
+}
+
 #[cfg(test)]
 mod tests {
     use std::boxed::Box;
@@ -2285,20 +2415,22 @@ impl<T: ?Sized> AsRef<T> for Arc<T> {
 #[stable(feature = "pin", since = "1.33.0")]
 impl<T: ?Sized> Unpin for Arc<T> { }
 
-/// Computes the offset of the data field within ArcInner.
+/// Computes the offset of the data field within `ArcInner`.
 unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> isize {
-    // Align the unsized value to the end of the ArcInner.
-    // Because it is ?Sized, it will always be the last field in memory.
-    let align = align_of_val(&*ptr);
-    let layout = Layout::new::<ArcInner<()>>();
-    (layout.size() + layout.padding_needed_for(align)) as isize
+    // Align the unsized value to the end of the `ArcInner`.
+    // Because it is `?Sized`, it will always be the last field in memory.
+    data_offset_align(align_of_val(&*ptr))
 }
 
-/// Computes the offset of the data field within ArcInner.
+/// Computes the offset of the data field within `ArcInner`.
 ///
 /// Unlike [`data_offset`], this doesn't need the pointer, but it works only on `T: Sized`.
 fn data_offset_sized<T>() -> isize {
-    let align = align_of::<T>();
+    data_offset_align(align_of::<T>())
+}
+
+#[inline]
+fn data_offset_align(align: usize) -> isize {
     let layout = Layout::new::<ArcInner<()>>();
     (layout.size() + layout.padding_needed_for(align)) as isize
 }
diff --git a/src/liballoc/tests/arc.rs b/src/liballoc/tests/arc.rs
index 2759b1b1cac..cf2ad2a8e60 100644
--- a/src/liballoc/tests/arc.rs
+++ b/src/liballoc/tests/arc.rs
@@ -2,6 +2,8 @@ use std::any::Any;
 use std::sync::{Arc, Weak};
 use std::cell::RefCell;
 use std::cmp::PartialEq;
+use std::iter::TrustedLen;
+use std::mem;
 
 #[test]
 fn uninhabited() {
@@ -85,3 +87,122 @@ fn eq() {
     assert!(!(x != x));
     assert_eq!(*x.0.borrow(), 0);
 }
+
+// The test code below is identical to that in `rc.rs`.
+// For better maintainability we therefore define this type alias.
+type Rc<T> = Arc<T>;
+
+const SHARED_ITER_MAX: u16 = 100;
+
+fn assert_trusted_len<I: TrustedLen>(_: &I) {}
+
+#[test]
+fn shared_from_iter_normal() {
+    // Exercise the base implementation for non-`TrustedLen` iterators.
+    {
+        // `Filter` is never `TrustedLen` since we don't
+        // know statically how many elements will be kept:
+        let iter = (0..SHARED_ITER_MAX).filter(|x| x % 2 == 0).map(Box::new);
+
+        // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+
+        // Clone a bit and let these get dropped.
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    } // Drop what hasn't been here.
+}
+
+#[test]
+fn shared_from_iter_trustedlen_normal() {
+    // Exercise the `TrustedLen` implementation under normal circumstances
+    // where `size_hint()` matches `(_, Some(exact_len))`.
+    {
+        let iter = (0..SHARED_ITER_MAX).map(Box::new);
+        assert_trusted_len(&iter);
+
+        // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+        assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc));
+
+        // Clone a bit and let these get dropped.
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    } // Drop what hasn't been here.
+
+    // Try a ZST to make sure it is handled well.
+    {
+        let iter = (0..SHARED_ITER_MAX).map(|_| ());
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+        assert_eq!(0, mem::size_of_val(&*rc));
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    }
+}
+
+#[test]
+#[should_panic = "I've almost got 99 problems."]
+fn shared_from_iter_trustedlen_panic() {
+    // Exercise the `TrustedLen` implementation when `size_hint()` matches
+    // `(_, Some(exact_len))` but where `.next()` drops before the last iteration.
+    let iter = (0..SHARED_ITER_MAX)
+        .map(|val| {
+            match val {
+                98 => panic!("I've almost got 99 problems."),
+                _ => Box::new(val),
+            }
+        });
+    assert_trusted_len(&iter);
+    let _ = iter.collect::<Rc<[_]>>();
+
+    panic!("I am unreachable.");
+}
+
+#[test]
+fn shared_from_iter_trustedlen_no_fuse() {
+    // Exercise the `TrustedLen` implementation when `size_hint()` matches
+    // `(_, Some(exact_len))` but where the iterator does not behave in a fused manner.
+    struct Iter(std::vec::IntoIter<Option<Box<u8>>>);
+
+    unsafe impl TrustedLen for Iter {}
+
+    impl Iterator for Iter {
+        fn size_hint(&self) -> (usize, Option<usize>) {
+            (2, Some(2))
+        }
+
+        type Item = Box<u8>;
+
+        fn next(&mut self) -> Option<Self::Item> {
+            self.0.next().flatten()
+        }
+    }
+
+    let vec = vec![
+        Some(Box::new(42)),
+        Some(Box::new(24)),
+        None,
+        Some(Box::new(12)),
+    ];
+    let iter = Iter(vec.into_iter());
+    assert_trusted_len(&iter);
+    assert_eq!(
+        &[Box::new(42), Box::new(24)],
+        &*iter.collect::<Rc<[_]>>()
+    );
+}
diff --git a/src/liballoc/tests/lib.rs b/src/liballoc/tests/lib.rs
index ddb3120e89d..5a43c8e09a2 100644
--- a/src/liballoc/tests/lib.rs
+++ b/src/liballoc/tests/lib.rs
@@ -2,8 +2,10 @@
 #![feature(box_syntax)]
 #![feature(drain_filter)]
 #![feature(exact_size_is_empty)]
+#![feature(option_flattening)]
 #![feature(pattern)]
 #![feature(repeat_generic_slice)]
+#![feature(trusted_len)]
 #![feature(try_reserve)]
 #![feature(unboxed_closures)]
 #![deny(rust_2018_idioms)]
diff --git a/src/liballoc/tests/rc.rs b/src/liballoc/tests/rc.rs
index 18f82e80410..7854ca0fc16 100644
--- a/src/liballoc/tests/rc.rs
+++ b/src/liballoc/tests/rc.rs
@@ -2,6 +2,8 @@ use std::any::Any;
 use std::rc::{Rc, Weak};
 use std::cell::RefCell;
 use std::cmp::PartialEq;
+use std::mem;
+use std::iter::TrustedLen;
 
 #[test]
 fn uninhabited() {
@@ -85,3 +87,118 @@ fn eq() {
     assert!(!(x != x));
     assert_eq!(*x.0.borrow(), 0);
 }
+
+const SHARED_ITER_MAX: u16 = 100;
+
+fn assert_trusted_len<I: TrustedLen>(_: &I) {}
+
+#[test]
+fn shared_from_iter_normal() {
+    // Exercise the base implementation for non-`TrustedLen` iterators.
+    {
+        // `Filter` is never `TrustedLen` since we don't
+        // know statically how many elements will be kept:
+        let iter = (0..SHARED_ITER_MAX).filter(|x| x % 2 == 0).map(Box::new);
+
+        // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+
+        // Clone a bit and let these get dropped.
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    } // Drop what hasn't been here.
+}
+
+#[test]
+fn shared_from_iter_trustedlen_normal() {
+    // Exercise the `TrustedLen` implementation under normal circumstances
+    // where `size_hint()` matches `(_, Some(exact_len))`.
+    {
+        let iter = (0..SHARED_ITER_MAX).map(Box::new);
+        assert_trusted_len(&iter);
+
+        // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+        assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc));
+
+        // Clone a bit and let these get dropped.
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    } // Drop what hasn't been here.
+
+    // Try a ZST to make sure it is handled well.
+    {
+        let iter = (0..SHARED_ITER_MAX).map(|_| ());
+        let vec = iter.clone().collect::<Vec<_>>();
+        let rc = iter.collect::<Rc<[_]>>();
+        assert_eq!(&*vec, &*rc);
+        assert_eq!(0, mem::size_of_val(&*rc));
+        {
+            let _rc_2 = rc.clone();
+            let _rc_3 = rc.clone();
+            let _rc_4 = Rc::downgrade(&_rc_3);
+        }
+    }
+}
+
+#[test]
+#[should_panic = "I've almost got 99 problems."]
+fn shared_from_iter_trustedlen_panic() {
+    // Exercise the `TrustedLen` implementation when `size_hint()` matches
+    // `(_, Some(exact_len))` but where `.next()` drops before the last iteration.
+    let iter = (0..SHARED_ITER_MAX)
+        .map(|val| {
+            match val {
+                98 => panic!("I've almost got 99 problems."),
+                _ => Box::new(val),
+            }
+        });
+    assert_trusted_len(&iter);
+    let _ = iter.collect::<Rc<[_]>>();
+
+    panic!("I am unreachable.");
+}
+
+#[test]
+fn shared_from_iter_trustedlen_no_fuse() {
+    // Exercise the `TrustedLen` implementation when `size_hint()` matches
+    // `(_, Some(exact_len))` but where the iterator does not behave in a fused manner.
+    struct Iter(std::vec::IntoIter<Option<Box<u8>>>);
+
+    unsafe impl TrustedLen for Iter {}
+
+    impl Iterator for Iter {
+        fn size_hint(&self) -> (usize, Option<usize>) {
+            (2, Some(2))
+        }
+
+        type Item = Box<u8>;
+
+        fn next(&mut self) -> Option<Self::Item> {
+            self.0.next().flatten()
+        }
+    }
+
+    let vec = vec![
+        Some(Box::new(42)),
+        Some(Box::new(24)),
+        None,
+        Some(Box::new(12)),
+    ];
+    let iter = Iter(vec.into_iter());
+    assert_trusted_len(&iter);
+    assert_eq!(
+        &[Box::new(42), Box::new(24)],
+        &*iter.collect::<Rc<[_]>>()
+    );
+}