about summary refs log tree commit diff
path: root/src/libcore/slice
diff options
context:
space:
mode:
authorJorge Aparicio <jorge@japaric.io>2018-08-19 17:45:31 +0200
committerJorge Aparicio <jorge@japaric.io>2018-09-22 21:01:21 +0200
commit851acdd22dd2e99759e7f2f3e613ee9566ea0dcc (patch)
tree96508bcb735cdff5176cbd8c046670b9a42552e7 /src/libcore/slice
parent7bb5b3eb3228df648a08b02c85eddcd9b9cc85bd (diff)
downloadrust-851acdd22dd2e99759e7f2f3e613ee9566ea0dcc.tar.gz
rust-851acdd22dd2e99759e7f2f3e613ee9566ea0dcc.zip
core: fix deprecated warnings
Diffstat (limited to 'src/libcore/slice')
-rw-r--r--src/libcore/slice/rotate.rs9
-rw-r--r--src/libcore/slice/sort.rs14
2 files changed, 10 insertions, 13 deletions
diff --git a/src/libcore/slice/rotate.rs b/src/libcore/slice/rotate.rs
index 0d182b84974..affe525ae46 100644
--- a/src/libcore/slice/rotate.rs
+++ b/src/libcore/slice/rotate.rs
@@ -9,7 +9,7 @@
 // except according to those terms.
 
 use cmp;
-use mem;
+use mem::{self, MaybeUninit};
 use ptr;
 
 /// Rotation is much faster if it has access to a little bit of memory. This
@@ -26,9 +26,6 @@ union RawArray<T> {
 }
 
 impl<T> RawArray<T> {
-    fn new() -> Self {
-        unsafe { mem::uninitialized() }
-    }
     fn ptr(&self) -> *mut T {
         unsafe { &self.typed as *const T as *mut T }
     }
@@ -88,8 +85,8 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mid: *mut T, mut right: usize) {
         }
     }
 
-    let rawarray = RawArray::new();
-    let buf = rawarray.ptr();
+    let rawarray = MaybeUninit::<RawArray<T>>::uninitialized();
+    let buf = rawarray.get_ref().ptr();
 
     let dim = mid.sub(left).add(right);
     if left <= right {
diff --git a/src/libcore/slice/sort.rs b/src/libcore/slice/sort.rs
index e4c1fd03f9e..cf0c5d876ab 100644
--- a/src/libcore/slice/sort.rs
+++ b/src/libcore/slice/sort.rs
@@ -17,7 +17,7 @@
 //! stable sorting implementation.
 
 use cmp;
-use mem;
+use mem::{self, MaybeUninit};
 use ptr;
 
 /// When dropped, copies from `src` into `dest`.
@@ -226,14 +226,14 @@ fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
     let mut block_l = BLOCK;
     let mut start_l = ptr::null_mut();
     let mut end_l = ptr::null_mut();
-    let mut offsets_l: [u8; BLOCK] = unsafe { mem::uninitialized() };
+    let mut offsets_l = MaybeUninit::<[u8; BLOCK]>::uninitialized();
 
     // The current block on the right side (from `r.sub(block_r)` to `r`).
     let mut r = unsafe { l.add(v.len()) };
     let mut block_r = BLOCK;
     let mut start_r = ptr::null_mut();
     let mut end_r = ptr::null_mut();
-    let mut offsets_r: [u8; BLOCK] = unsafe { mem::uninitialized() };
+    let mut offsets_r = MaybeUninit::<[u8; BLOCK]>::uninitialized();
 
     // FIXME: When we get VLAs, try creating one array of length `min(v.len(), 2 * BLOCK)` rather
     // than two fixed-size arrays of length `BLOCK`. VLAs might be more cache-efficient.
@@ -272,8 +272,8 @@ fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
 
         if start_l == end_l {
             // Trace `block_l` elements from the left side.
-            start_l = offsets_l.as_mut_ptr();
-            end_l = offsets_l.as_mut_ptr();
+            start_l = unsafe { offsets_l.get_mut().as_mut_ptr() };
+            end_l = unsafe { offsets_l.get_mut().as_mut_ptr() };
             let mut elem = l;
 
             for i in 0..block_l {
@@ -288,8 +288,8 @@ fn partition_in_blocks<T, F>(v: &mut [T], pivot: &T, is_less: &mut F) -> usize
 
         if start_r == end_r {
             // Trace `block_r` elements from the right side.
-            start_r = offsets_r.as_mut_ptr();
-            end_r = offsets_r.as_mut_ptr();
+            start_r = unsafe { offsets_r.get_mut().as_mut_ptr() };
+            end_r = unsafe {  offsets_r.get_mut().as_mut_ptr() };
             let mut elem = r;
 
             for i in 0..block_r {