about summary refs log tree commit diff
path: root/src/libcore/slice
diff options
context:
space:
mode:
authorMichael Bradshaw <mjbshaw@google.com>2018-09-29 19:51:09 -0700
committerMichael Bradshaw <mjbshaw@google.com>2018-09-29 19:51:09 -0700
commit43cc32fbb2506eff0090e894c1e8a46b62a8eb0b (patch)
treeac301fe1be819c25d833880da0ab32bf550016eb /src/libcore/slice
parentaec5330082a0c4664abf0f6604c1b05768a90234 (diff)
parent9653f790333d1270f36f1614e85d8a7b54193e75 (diff)
downloadrust-43cc32fbb2506eff0090e894c1e8a46b62a8eb0b.tar.gz
rust-43cc32fbb2506eff0090e894c1e8a46b62a8eb0b.zip
Merge branch 'master' into drop
Diffstat (limited to 'src/libcore/slice')
-rw-r--r--src/libcore/slice/rotate.rs12
-rw-r--r--src/libcore/slice/sort.rs14
2 files changed, 16 insertions, 10 deletions
diff --git a/src/libcore/slice/rotate.rs b/src/libcore/slice/rotate.rs
index 07153735300..0d182b84974 100644
--- a/src/libcore/slice/rotate.rs
+++ b/src/libcore/slice/rotate.rs
@@ -9,7 +9,7 @@
 // except according to those terms.
 
 use cmp;
-use mem::{self, MaybeUninit};
+use mem;
 use ptr;
 
 /// Rotation is much faster if it has access to a little bit of memory. This
@@ -26,6 +26,12 @@ union RawArray<T> {
 }
 
 impl<T> RawArray<T> {
+    fn new() -> Self {
+        unsafe { mem::uninitialized() }
+    }
+    fn ptr(&self) -> *mut T {
+        unsafe { &self.typed as *const T as *mut T }
+    }
     fn cap() -> usize {
         if mem::size_of::<T>() == 0 {
             usize::max_value()
@@ -82,8 +88,8 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mid: *mut T, mut right: usize) {
         }
     }
 
-    let mut rawarray = MaybeUninit::<RawArray<T>>::uninitialized();
-    let buf = &mut (*rawarray.as_mut_ptr()).typed as *mut [T; 2] as *mut T;
+    let rawarray = RawArray::new();
+    let buf = rawarray.ptr();
 
     let dim = mid.sub(left).add(right);
     if left <= right {
diff --git a/src/libcore/slice/sort.rs b/src/libcore/slice/sort.rs
index affe84fbef9..e4c1fd03f9e 100644
--- a/src/libcore/slice/sort.rs
+++ b/src/libcore/slice/sort.rs
@@ -17,7 +17,7 @@
 //! stable sorting implementation.
 
 use cmp;
-use mem::{self, MaybeUninit};
+use mem;
 use ptr;
 
 /// When dropped, copies from `src` into `dest`.
@@ -226,14 +226,14 @@ fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
     let mut block_l = BLOCK;
     let mut start_l = ptr::null_mut();
     let mut end_l = ptr::null_mut();
-    let mut offsets_l = MaybeUninit::<[u8; BLOCK]>::uninitialized();
+    let mut offsets_l: [u8; BLOCK] = unsafe { mem::uninitialized() };
 
     // The current block on the right side (from `r.sub(block_r)` to `r`).
     let mut r = unsafe { l.add(v.len()) };
     let mut block_r = BLOCK;
     let mut start_r = ptr::null_mut();
     let mut end_r = ptr::null_mut();
-    let mut offsets_r = MaybeUninit::<[u8; BLOCK]>::uninitialized();
+    let mut offsets_r: [u8; BLOCK] = unsafe { mem::uninitialized() };
 
     // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
     // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
@@ -272,8 +272,8 @@ fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
 
         if start_l == end_l {
             // Trace `block_l` elements from the left side.
-            start_l = offsets_l.as_mut_ptr() as *mut u8;
-            end_l = offsets_l.as_mut_ptr() as *mut u8;
+            start_l = offsets_l.as_mut_ptr();
+            end_l = offsets_l.as_mut_ptr();
             let mut elem = l;
 
             for i in 0..block_l {
@@ -288,8 +288,8 @@ fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
 
         if start_r == end_r {
             // Trace `block_r` elements from the right side.
-            start_r = offsets_r.as_mut_ptr() as *mut u8;
-            end_r = offsets_r.as_mut_ptr() as *mut u8;
+            start_r = offsets_r.as_mut_ptr();
+            end_r = offsets_r.as_mut_ptr();
             let mut elem = r;
 
             for i in 0..block_r {