about summary refs log tree commit diff
diff options
context:
space:
mode:
authorRalf Jung <post@ralfj.de>2019-04-07 19:54:29 +0200
committerRalf Jung <post@ralfj.de>2019-04-07 19:54:29 +0200
commit944ffbf5b550b4e6e2fa509d59ae1ae5d72d10ea (patch)
treee39cd381adc851403089e32ba84c6bf023722a9a
parentae1f8ab4aa6775ae589929a0921f794b1d31c161 (diff)
downloadrust-944ffbf5b550b4e6e2fa509d59ae1ae5d72d10ea.tar.gz
rust-944ffbf5b550b4e6e2fa509d59ae1ae5d72d10ea.zip
initialize unsized locals when copying to the for the first time
-rw-r--r--src/librustc_mir/interpret/eval_context.rs11
-rw-r--r--src/librustc_mir/interpret/place.rs79
2 files changed, 57 insertions, 33 deletions
diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs
index 600d20be397..7c900bd596a 100644
--- a/src/librustc_mir/interpret/eval_context.rs
+++ b/src/librustc_mir/interpret/eval_context.rs
@@ -702,10 +702,15 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> InterpretCx<'a, 'mir, 'tc
                     LocalValue::Dead => write!(msg, " is dead").unwrap(),
                     LocalValue::Uninitialized => write!(msg, " is uninitialized").unwrap(),
                     LocalValue::Live(Operand::Indirect(mplace)) => {
-                        let (ptr, align) = mplace.to_scalar_ptr_align();
-                        match ptr {
+                        match mplace.ptr {
                             Scalar::Ptr(ptr) => {
-                                write!(msg, " by align({}) ref:", align.bytes()).unwrap();
+                                write!(msg, " by align({}){} ref:",
+                                    mplace.align.bytes(),
+                                    match mplace.meta {
+                                        Some(meta) => format!(" meta({:?})", meta),
+                                        None => String::new()
+                                    }
+                                ).unwrap();
                                 allocs.push(ptr.alloc_id);
                             }
                             ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs
index f69ce4e0d3d..93bef813ba6 100644
--- a/src/librustc_mir/interpret/place.rs
+++ b/src/librustc_mir/interpret/place.rs
@@ -826,8 +826,6 @@ where
         src: OpTy<'tcx, M::PointerTag>,
         dest: PlaceTy<'tcx, M::PointerTag>,
     ) -> EvalResult<'tcx> {
-        debug_assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(),
-            "Cannot copy unsized data");
         // We do NOT compare the types for equality, because well-typed code can
         // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
         assert!(src.layout.details == dest.layout.details,
@@ -836,6 +834,7 @@ where
         // Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
         let src = match self.try_read_immediate(src)? {
             Ok(src_val) => {
+                assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
                 // Yay, we got a value that we can write directly.
                 // FIXME: Add a check to make sure that if `src` is indirect,
                 // it does not overlap with `dest`.
@@ -846,13 +845,19 @@ where
         // Slow path, this does not fit into an immediate. Just memcpy.
         trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
 
-        let dest = self.force_allocation(dest)?;
-        let (src_ptr, src_align) = src.to_scalar_ptr_align();
-        let (dest_ptr, dest_align) = dest.to_scalar_ptr_align();
+        // This interprets `src.meta` with the `dest` local's layout, if an unsized local
+        // is being initialized!
+        let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?;
+        let size = size.unwrap_or_else(|| {
+            assert!(!dest.layout.is_unsized(),
+                "Cannot copy into already initialized unsized place");
+            dest.layout.size
+        });
+        assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
         self.memory.copy(
-            src_ptr, src_align,
-            dest_ptr, dest_align,
-            dest.layout.size,
+            src.ptr, src.align,
+            dest.ptr, dest.align,
+            size,
             /*nonoverlapping*/ true,
         )?;
 
@@ -870,11 +875,13 @@ where
             // Fast path: Just use normal `copy_op`
             return self.copy_op(src, dest);
         }
-        // We still require the sizes to match
-        debug_assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(),
-            "Cannot copy unsized data");
+        // We still require the sizes to match.
         assert!(src.layout.size == dest.layout.size,
             "Size mismatch when transmuting!\nsrc: {:#?}\ndest: {:#?}", src, dest);
+        // Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want
+        // to avoid that here.
+        assert!(!src.layout.is_unsized() && !dest.layout.is_unsized(),
+            "Cannot transmute unsized data");
 
         // The hard case is `ScalarPair`.  `src` is already read from memory in this case,
         // using `src.layout` to figure out which bytes to use for the 1st and 2nd field.
@@ -902,11 +909,16 @@ where
     /// If the place currently refers to a local that doesn't yet have a matching allocation,
     /// create such an allocation.
     /// This is essentially `force_to_memplace`.
-    pub fn force_allocation(
+    ///
+    /// This supports unsized types and returnes the computed size to avoid some
+    /// redundant computation when copying; use `force_allocation` for a simpler, sized-only
+    /// version.
+    pub fn force_allocation_maybe_sized(
         &mut self,
         place: PlaceTy<'tcx, M::PointerTag>,
-    ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
-        let mplace = match place.place {
+        meta: Option<Scalar<M::PointerTag>>,
+    ) -> EvalResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
+        let (mplace, size) = match place.place {
             Place::Local { frame, local } => {
                 match self.stack[frame].locals[local].access_mut()? {
                     Ok(local_val) => {
@@ -926,28 +938,41 @@ where
                         // We need the layout of the local.  We can NOT use the layout we got,
                         // that might e.g., be an inner field of a struct with `Scalar` layout,
                         // that has different alignment than the outer field.
+                        // We also need to support unsized types, and hence cannot use `allocate`.
                         let local_layout = self.layout_of_local(&self.stack[frame], local, None)?;
-                        let ptr = self.allocate(local_layout, MemoryKind::Stack);
+                        let (size, align) = self.size_and_align_of(meta, local_layout)?
+                            .expect("Cannot allocate for non-dyn-sized type");
+                        let ptr = self.memory.allocate(size, align, MemoryKind::Stack);
+                        let ptr = M::tag_new_allocation(self, ptr, MemoryKind::Stack);
+                        let mplace = MemPlace { ptr: ptr.into(), align, meta };
                         if let Some(value) = old_val {
                             // Preserve old value.
                             // We don't have to validate as we can assume the local
                             // was already valid for its type.
-                            self.write_immediate_to_mplace_no_validate(value, ptr)?;
+                            let mplace = MPlaceTy { mplace, layout: local_layout };
+                            self.write_immediate_to_mplace_no_validate(value, mplace)?;
                         }
-                        let mplace = ptr.mplace;
                         // Now we can call `access_mut` again, asserting it goes well,
                         // and actually overwrite things.
                         *self.stack[frame].locals[local].access_mut().unwrap().unwrap() =
                             LocalValue::Live(Operand::Indirect(mplace));
-                        mplace
+                        (mplace, Some(size))
                     }
-                    Err(mplace) => mplace, // this already was an indirect local
+                    Err(mplace) => (mplace, None), // this already was an indirect local
                 }
             }
-            Place::Ptr(mplace) => mplace
+            Place::Ptr(mplace) => (mplace, None)
         };
         // Return with the original layout, so that the caller can go on
-        Ok(MPlaceTy { mplace, layout: place.layout })
+        Ok((MPlaceTy { mplace, layout: place.layout }, size))
+    }
+
+    #[inline(always)]
+    pub fn force_allocation(
+        &mut self,
+        place: PlaceTy<'tcx, M::PointerTag>,
+    ) -> EvalResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        Ok(self.force_allocation_maybe_sized(place, None)?.0)
     }
 
     pub fn allocate(
@@ -955,15 +980,9 @@ where
         layout: TyLayout<'tcx>,
         kind: MemoryKind<M::MemoryKinds>,
     ) -> MPlaceTy<'tcx, M::PointerTag> {
-        if layout.is_unsized() {
-            assert!(self.tcx.features().unsized_locals, "cannot alloc memory for unsized type");
-            // FIXME: What should we do here? We should definitely also tag!
-            MPlaceTy::dangling(layout, self)
-        } else {
-            let ptr = self.memory.allocate(layout.size, layout.align.abi, kind);
-            let ptr = M::tag_new_allocation(self, ptr, kind);
-            MPlaceTy::from_aligned_ptr(ptr, layout)
-        }
+        let ptr = self.memory.allocate(layout.size, layout.align.abi, kind);
+        let ptr = M::tag_new_allocation(self, ptr, kind);
+        MPlaceTy::from_aligned_ptr(ptr, layout)
     }
 
     pub fn write_discriminant_index(