about summary refs log tree commit diff
diff options
context:
space:
mode:
authorAlbin Hedman <albin9604@gmail.com>2021-03-13 20:33:27 +0100
committerAlbin Hedman <albin9604@gmail.com>2021-03-15 20:45:22 +0100
commit64e2248794caada12469a0bd31d883eb6370b095 (patch)
treeca49348dcc25a72a9d7099859127afd809900576
parent62cf2445633c03a927b7fef4abe82ea480df4078 (diff)
downloadrust-64e2248794caada12469a0bd31d883eb6370b095.tar.gz
rust-64e2248794caada12469a0bd31d883eb6370b095.zip
Constify mem::swap and ptr::swap[_nonoverlapping]
-rw-r--r--library/core/src/intrinsics.rs12
-rw-r--r--library/core/src/lib.rs1
-rw-r--r--library/core/src/mem/mod.rs3
-rw-r--r--library/core/src/ptr/mod.rs23
4 files changed, 12 insertions, 27 deletions
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index 634ed87b091..4c2472ed82c 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -1902,18 +1902,6 @@ pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool {
     !ptr.is_null() && ptr as usize % mem::align_of::<T>() == 0
 }
 
-/// Checks whether the regions of memory starting at `src` and `dst` of size
-/// `count * size_of::<T>()` do *not* overlap.
-pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool {
-    let src_usize = src as usize;
-    let dst_usize = dst as usize;
-    let size = mem::size_of::<T>().checked_mul(count).unwrap();
-    let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize };
-    // If the absolute distance between the ptrs is at least as big as the size of the buffer,
-    // they do not overlap.
-    diff >= size
-}
-
 /// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to
 /// `val`.
 ///
diff --git a/library/core/src/lib.rs b/library/core/src/lib.rs
index e10e1738de5..8e35adcbd9e 100644
--- a/library/core/src/lib.rs
+++ b/library/core/src/lib.rs
@@ -98,6 +98,7 @@
 #![feature(const_slice_from_raw_parts)]
 #![feature(const_slice_ptr_len)]
 #![feature(const_size_of_val)]
+#![feature(const_swap)]
 #![feature(const_align_of_val)]
 #![feature(const_type_id)]
 #![feature(const_type_name)]
diff --git a/library/core/src/mem/mod.rs b/library/core/src/mem/mod.rs
index 84edbd30a5d..c01730b6551 100644
--- a/library/core/src/mem/mod.rs
+++ b/library/core/src/mem/mod.rs
@@ -682,7 +682,8 @@ pub unsafe fn uninitialized<T>() -> T {
 /// ```
 #[inline]
 #[stable(feature = "rust1", since = "1.0.0")]
-pub fn swap<T>(x: &mut T, y: &mut T) {
+#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+pub const fn swap<T>(x: &mut T, y: &mut T) {
     // SAFETY: the raw pointers have been created from safe mutable references satisfying all the
     // constraints on `ptr::swap_nonoverlapping_one`
     unsafe {
diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs
index 5ac260fc883..155a64345e9 100644
--- a/library/core/src/ptr/mod.rs
+++ b/library/core/src/ptr/mod.rs
@@ -67,7 +67,7 @@
 use crate::cmp::Ordering;
 use crate::fmt;
 use crate::hash;
-use crate::intrinsics::{self, abort, is_aligned_and_not_null, is_nonoverlapping};
+use crate::intrinsics::{self, abort, is_aligned_and_not_null};
 use crate::mem::{self, MaybeUninit};
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -394,7 +394,8 @@ pub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
 /// ```
 #[inline]
 #[stable(feature = "rust1", since = "1.0.0")]
-pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
+#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+pub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
     // Give ourselves some scratch space to work with.
     // We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
     let mut tmp = MaybeUninit::<T>::uninit();
@@ -451,16 +452,8 @@ pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
 /// ```
 #[inline]
 #[stable(feature = "swap_nonoverlapping", since = "1.27.0")]
-pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
-    if cfg!(debug_assertions)
-        && !(is_aligned_and_not_null(x)
-            && is_aligned_and_not_null(y)
-            && is_nonoverlapping(x, y, count))
-    {
-        // Not panicking to keep codegen impact smaller.
-        abort();
-    }
-
+#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
     let x = x as *mut u8;
     let y = y as *mut u8;
     let len = mem::size_of::<T>() * count;
@@ -470,7 +463,8 @@ pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
 }
 
 #[inline]
-pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
+#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+pub(crate) const unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
     // For types smaller than the block optimization below,
     // just swap directly to avoid pessimizing codegen.
     if mem::size_of::<T>() < 32 {
@@ -488,7 +482,8 @@ pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
 }
 
 #[inline]
-unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
+#[rustc_const_unstable(feature = "const_swap", issue = "83163")]
+const unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
     // The approach here is to utilize simd to swap x & y efficiently. Testing reveals
     // that swapping either 32 bytes or 64 bytes at a time is most efficient for Intel
     // Haswell E processors. LLVM is more able to optimize if we give a struct a