about summary refs log tree commit diff
diff options
context:
space:
mode:
authorRalf Jung <post@ralfj.de>2017-07-20 13:20:33 -0700
committerRalf Jung <post@ralfj.de>2017-07-20 13:24:06 -0700
commit14cb85809b39c4bdaa19d119cded25a8dffe8692 (patch)
tree310da2b328f48ca4e9964cec7da5eff4b211fd9c
parentf822ad5c637f0e2d51370a9ea45c3a21f5e2d7dd (diff)
downloadrust-14cb85809b39c4bdaa19d119cded25a8dffe8692.tar.gz
rust-14cb85809b39c4bdaa19d119cded25a8dffe8692.zip
always test alignment in memory.rs
-rw-r--r--src/memory.rs41
-rw-r--r--src/terminator/intrinsic.rs9
-rw-r--r--src/terminator/mod.rs4
-rw-r--r--tests/compile-fail/unaligned_ptr_cast_zst.rs6
-rw-r--r--tests/run-pass-fullmir/vecs.rs8
5 files changed, 52 insertions, 16 deletions
diff --git a/src/memory.rs b/src/memory.rs
index 71f4c329b5b..481744b11c4 100644
--- a/src/memory.rs
+++ b/src/memory.rs
@@ -194,7 +194,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
         }
 
         let ptr = self.allocate(bytes.len() as u64, 1, Kind::UninitializedStatic)?;
-        self.write_bytes(PrimVal::Ptr(ptr), bytes)?;
+        self.write_bytes(ptr.into(), bytes)?;
         self.mark_static_initalized(ptr.alloc_id, Mutability::Immutable)?;
         self.literal_alloc_cache.insert(bytes.to_vec(), ptr.alloc_id);
         Ok(ptr)
@@ -280,6 +280,7 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
         self.layout.endian
     }
 
+    /// Check that the pointer is aligned and non-NULL
     pub fn check_align(&self, ptr: Pointer, align: u64) -> EvalResult<'tcx> {
         let offset = match ptr.into_inner_primval() {
             PrimVal::Ptr(ptr) => {
@@ -532,13 +533,13 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
 /// Byte accessors
 impl<'a, 'tcx> Memory<'a, 'tcx> {
     fn get_bytes_unchecked(&self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &[u8]> {
-        if size == 0 {
-            return Ok(&[]);
-        }
-        // FIXME: check alignment for zst memory accesses?
+        // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
         if self.reads_are_aligned {
             self.check_align(ptr.into(), align)?;
         }
+        if size == 0 {
+            return Ok(&[]);
+        }
         self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
         let alloc = self.get(ptr.alloc_id)?;
         assert_eq!(ptr.offset as usize as u64, ptr.offset);
@@ -548,13 +549,13 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
     }
 
     fn get_bytes_unchecked_mut(&mut self, ptr: MemoryPointer, size: u64, align: u64) -> EvalResult<'tcx, &mut [u8]> {
-        if size == 0 {
-            return Ok(&mut []);
-        }
-        // FIXME: check alignment for zst memory accesses?
+        // Zero-sized accesses can use dangling pointers, but they still have to be aligned and non-NULL
         if self.writes_are_aligned {
             self.check_align(ptr.into(), align)?;
         }
+        if size == 0 {
+            return Ok(&mut []);
+        }
         self.check_bounds(ptr.offset(size, self.layout)?, true)?; // if ptr.offset is in bounds, then so is ptr (because offset checks for overflow)
         let alloc = self.get_mut(ptr.alloc_id)?;
         assert_eq!(ptr.offset as usize as u64, ptr.offset);
@@ -643,7 +644,13 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
 
     pub fn copy(&mut self, src: Pointer, dest: Pointer, size: u64, align: u64, nonoverlapping: bool) -> EvalResult<'tcx> {
         if size == 0 {
-            // TODO: Should we check for alignment here? (Also see write_bytes intrinsic)
+            // Empty accesses don't need to be valid pointers, but they should still be aligned
+            if self.reads_are_aligned {
+                self.check_align(src, align)?;
+            }
+            if self.writes_are_aligned {
+                self.check_align(dest, align)?;
+            }
             return Ok(());
         }
         let src = src.to_ptr()?;
@@ -695,13 +702,21 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
 
     pub fn read_bytes(&self, ptr: Pointer, size: u64) -> EvalResult<'tcx, &[u8]> {
         if size == 0 {
+            // Empty accesses don't need to be valid pointers, but they should still be non-NULL
+            if self.reads_are_aligned {
+                self.check_align(ptr, 1)?;
+            }
             return Ok(&[]);
         }
         self.get_bytes(ptr.to_ptr()?, size, 1)
     }
 
-    pub fn write_bytes(&mut self, ptr: PrimVal, src: &[u8]) -> EvalResult<'tcx> {
+    pub fn write_bytes(&mut self, ptr: Pointer, src: &[u8]) -> EvalResult<'tcx> {
         if src.is_empty() {
+            // Empty accesses don't need to be valid pointers, but they should still be non-NULL
+            if self.writes_are_aligned {
+                self.check_align(ptr, 1)?;
+            }
             return Ok(());
         }
         let bytes = self.get_bytes_mut(ptr.to_ptr()?, src.len() as u64, 1)?;
@@ -711,6 +726,10 @@ impl<'a, 'tcx> Memory<'a, 'tcx> {
 
     pub fn write_repeat(&mut self, ptr: Pointer, val: u8, count: u64) -> EvalResult<'tcx> {
         if count == 0 {
+            // Empty accesses don't need to be valid pointers, but they should still be non-NULL
+            if self.writes_are_aligned {
+                self.check_align(ptr, 1)?;
+            }
             return Ok(());
         }
         let bytes = self.get_bytes_mut(ptr.to_ptr()?, count, 1)?;
diff --git a/src/terminator/intrinsic.rs b/src/terminator/intrinsic.rs
index da45d7b410a..2be5b7666f0 100644
--- a/src/terminator/intrinsic.rs
+++ b/src/terminator/intrinsic.rs
@@ -143,11 +143,13 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
             "copy_nonoverlapping" => {
                 let elem_ty = substs.type_at(0);
                 let elem_size = self.type_size(elem_ty)?.expect("cannot copy unsized value");
-                if elem_size != 0 {
+                let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
+                if count * elem_size != 0 {
+                    // TODO: We do not even validate alignment for the 0-bytes case.  libstd relies on this in vec::IntoIter::next.
+                    // Also see the write_bytes intrinsic.
                     let elem_align = self.type_align(elem_ty)?;
                     let src = arg_vals[0].into_ptr(&mut self.memory)?;
                     let dest = arg_vals[1].into_ptr(&mut self.memory)?;
-                    let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
                     self.memory.copy(src, dest, count * elem_size, elem_align, intrinsic_name.ends_with("_nonoverlapping"))?;
                 }
             }
@@ -465,7 +467,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
                 let ptr = arg_vals[0].into_ptr(&mut self.memory)?;
                 let count = self.value_to_primval(arg_vals[2], usize)?.to_u64()?;
                 if count > 0 {
-                    // TODO: Should we, at least, validate the alignment? (Also see memory::copy)
+                    // HashMap relies on write_bytes on a NULL ptr with count == 0 to work
+                    // TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
                     self.memory.check_align(ptr, ty_align)?;
                     self.memory.write_repeat(ptr, val_byte, size * count)?;
                 }
diff --git a/src/terminator/mod.rs b/src/terminator/mod.rs
index c4a8d2e73c2..12cec11b723 100644
--- a/src/terminator/mod.rs
+++ b/src/terminator/mod.rs
@@ -813,8 +813,8 @@ impl<'a, 'tcx> EvalContext<'a, 'tcx> {
                 if let Some((name, value)) = new {
                     // +1 for the null terminator
                     let value_copy = self.memory.allocate((value.len() + 1) as u64, 1, Kind::Env)?;
-                    self.memory.write_bytes(PrimVal::Ptr(value_copy), &value)?;
-                    self.memory.write_bytes(PrimVal::Ptr(value_copy.offset(value.len() as u64, self.memory.layout)?), &[0])?;
+                    self.memory.write_bytes(value_copy.into(), &value)?;
+                    self.memory.write_bytes(value_copy.offset(value.len() as u64, self.memory.layout)?.into(), &[0])?;
                     if let Some(var) = self.env_vars.insert(name.to_owned(), value_copy) {
                         self.memory.deallocate(var, None, Kind::Env)?;
                     }
diff --git a/tests/compile-fail/unaligned_ptr_cast_zst.rs b/tests/compile-fail/unaligned_ptr_cast_zst.rs
new file mode 100644
index 00000000000..fc603840684
--- /dev/null
+++ b/tests/compile-fail/unaligned_ptr_cast_zst.rs
@@ -0,0 +1,6 @@
+fn main() {
+    let x = &2u16;
+    let x = x as *const _ as *const [u32; 0];
+    // This must fail because alignment is violated.  Test specifically for loading ZST.
+    let _x = unsafe { *x }; //~ ERROR: tried to access memory with alignment 2, but alignment 4 is required
+}
diff --git a/tests/run-pass-fullmir/vecs.rs b/tests/run-pass-fullmir/vecs.rs
index fd3a00031d6..776791bbc9b 100644
--- a/tests/run-pass-fullmir/vecs.rs
+++ b/tests/run-pass-fullmir/vecs.rs
@@ -24,6 +24,13 @@ fn vec_into_iter() -> u8 {
         .fold(0, |x, y| x + y)
 }
 
+fn vec_into_iter_zst() -> usize {
+    vec![[0u64; 0], [0u64; 0]]
+        .into_iter()
+        .map(|x| x.len())
+        .sum()
+}
+
 fn vec_reallocate() -> Vec<u8> {
     let mut v = vec![1, 2];
     v.push(3);
@@ -35,6 +42,7 @@ fn vec_reallocate() -> Vec<u8> {
 fn main() {
     assert_eq!(vec_reallocate().len(), 5);
     assert_eq!(vec_into_iter(), 30);
+    assert_eq!(vec_into_iter_zst(), 0);
     assert_eq!(make_vec().capacity(), 4);
     assert_eq!(make_vec_macro(), [1, 2]);
     assert_eq!(make_vec_macro_repeat(), [42; 5]);