about summary refs log tree commit diff
path: root/src/libstd
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2014-05-20 23:31:30 -0700
committerbors <bors@rust-lang.org>2014-05-20 23:31:30 -0700
commit4afc15e30c582d0ef994f12fc5eaa90bcaeab180 (patch)
tree5a9b3f517121774014c01d03f3d0467ad0a0d43d /src/libstd
parentfeb9f302ca52b8105ad6641a3bf2646cd26d1434 (diff)
parent19dc3b50bd63489988eb8fc83d25b08ca83df151 (diff)
downloadrust-4afc15e30c582d0ef994f12fc5eaa90bcaeab180.tar.gz
rust-4afc15e30c582d0ef994f12fc5eaa90bcaeab180.zip
auto merge of #14259 : alexcrichton/rust/core-mem, r=brson
Excluding the functions inherited from the cast module last week (with marked
stability levels), these functions received the following treatment.

* size_of - this method has become #[stable]
* nonzero_size_of/nonzero_size_of_val - these methods have been removed
* min_align_of - this method is now #[stable]
* pref_align_of - this method has been renamed without the
  `pref_` prefix, and it is the "default alignment" now. This decision is in line
  with what clang does (see url linked in comment on function). This function
  is now #[stable].
* init - renamed to zeroed and marked #[stable]
* uninit - marked #[stable]
* move_val_init - renamed to overwrite and marked #[stable]
* {from,to}_{be,le}{16,32,64} - all functions marked #[stable]
* swap/replace/drop - marked #[stable]
* size_of_val/min_align_of_val/align_of_val - these functions are marked
  #[unstable], but will continue to exist in some form. Concerns have been
  raised about their `_val` prefix.
Diffstat (limited to 'src/libstd')
-rw-r--r--src/libstd/slice.rs8
-rw-r--r--src/libstd/unstable/mutex.rs4
-rw-r--r--src/libstd/vec.rs16
3 files changed, 16 insertions, 12 deletions
diff --git a/src/libstd/slice.rs b/src/libstd/slice.rs
index 66471ee3923..e78122f699d 100644
--- a/src/libstd/slice.rs
+++ b/src/libstd/slice.rs
@@ -306,8 +306,10 @@ impl<'a, T: Clone> CloneableVector<T> for &'a [T] {
             // this should pass the real required alignment
             let ret = exchange_malloc(size, 8) as *mut RawVec<()>;
 
-            (*ret).fill = len * mem::nonzero_size_of::<T>();
-            (*ret).alloc = len * mem::nonzero_size_of::<T>();
+            let a_size = mem::size_of::<T>();
+            let a_size = if a_size == 0 {1} else {a_size};
+            (*ret).fill = len * a_size;
+            (*ret).alloc = len * a_size;
 
             // Be careful with the following loop. We want it to be optimized
             // to a memcpy (or something similarly fast) when T is Copy. LLVM
@@ -318,7 +320,7 @@ impl<'a, T: Clone> CloneableVector<T> for &'a [T] {
             try_finally(
                 &mut i, (),
                 |i, ()| while *i < len {
-                    mem::move_val_init(
+                    mem::overwrite(
                         &mut(*p.offset(*i as int)),
                         self.unsafe_ref(*i).clone());
                     *i += 1;
diff --git a/src/libstd/unstable/mutex.rs b/src/libstd/unstable/mutex.rs
index c9d70915694..760214eb8f8 100644
--- a/src/libstd/unstable/mutex.rs
+++ b/src/libstd/unstable/mutex.rs
@@ -390,8 +390,8 @@ mod imp {
     impl Mutex {
         pub unsafe fn new() -> Mutex {
             let m = Mutex {
-                lock: Unsafe::new(mem::init()),
-                cond: Unsafe::new(mem::init()),
+                lock: Unsafe::new(mem::zeroed()),
+                cond: Unsafe::new(mem::zeroed()),
             };
 
             pthread_mutex_init(m.lock.get(), 0 as *libc::c_void);
diff --git a/src/libstd/vec.rs b/src/libstd/vec.rs
index 57f8d78948f..32883707615 100644
--- a/src/libstd/vec.rs
+++ b/src/libstd/vec.rs
@@ -117,7 +117,7 @@ impl<T> Vec<T> {
         unsafe {
             let mut xs = Vec::with_capacity(length);
             while xs.len < length {
-                mem::move_val_init(xs.as_mut_slice().unsafe_mut_ref(xs.len),
+                mem::overwrite(xs.as_mut_slice().unsafe_mut_ref(xs.len),
                                    op(xs.len));
                 xs.len += 1;
             }
@@ -214,7 +214,7 @@ impl<T: Clone> Vec<T> {
         unsafe {
             let mut xs = Vec::with_capacity(length);
             while xs.len < length {
-                mem::move_val_init(xs.as_mut_slice().unsafe_mut_ref(xs.len),
+                mem::overwrite(xs.as_mut_slice().unsafe_mut_ref(xs.len),
                                    value.clone());
                 xs.len += 1;
             }
@@ -325,7 +325,7 @@ impl<T:Clone> Clone for Vec<T> {
             let this_slice = self.as_slice();
             while vector.len < len {
                 unsafe {
-                    mem::move_val_init(
+                    mem::overwrite(
                         vector.as_mut_slice().unsafe_mut_ref(vector.len),
                         this_slice.unsafe_ref(vector.len).clone());
                 }
@@ -600,7 +600,7 @@ impl<T> Vec<T> {
 
         unsafe {
             let end = (self.ptr as *T).offset(self.len as int) as *mut T;
-            mem::move_val_init(&mut *end, value);
+            mem::overwrite(&mut *end, value);
             self.len += 1;
         }
     }
@@ -963,7 +963,7 @@ impl<T> Vec<T> {
                 ptr::copy_memory(p.offset(1), &*p, len - index);
                 // Write it in, overwriting the first copy of the `index`th
                 // element.
-                mem::move_val_init(&mut *p, element);
+                mem::overwrite(&mut *p, element);
             }
             self.set_len(len + 1);
         }
@@ -1542,8 +1542,10 @@ impl<T> FromVec<T> for ~[T] {
         unsafe {
             let ret = allocate(size, 8) as *mut RawVec<()>;
 
-            (*ret).fill = len * mem::nonzero_size_of::<T>();
-            (*ret).alloc = len * mem::nonzero_size_of::<T>();
+            let a_size = mem::size_of::<T>();
+            let a_size = if a_size == 0 {1} else {a_size};
+            (*ret).fill = len * a_size;
+            (*ret).alloc = len * a_size;
 
             ptr::copy_nonoverlapping_memory(&mut (*ret).data as *mut _ as *mut u8,
                                             vp as *u8, data_size);