about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/builder.rs2
-rw-r--r--compiler/rustc_const_eval/src/interpret/call.rs14
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs30
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs31
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsic.rs4
-rw-r--r--compiler/rustc_span/src/symbol.rs2
7 files changed, 35 insertions, 50 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index 299b98c0a4f..304ac4544ee 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -75,7 +75,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         // If we're swapping something that's *not* an `OperandValue::Ref`,
         // then we can do it directly and avoid the alloca.
         // Otherwise, we'll let the fallback MIR body take care of it.
-        if let sym::typed_swap = name {
+        if let sym::typed_swap_nonoverlapping = name {
             let pointee_ty = fn_args.type_at(0);
             let pointee_layout = bx.layout_of(pointee_ty);
             if !bx.is_backend_ref(pointee_layout)
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
index b0138ac8bfe..3ee13b19f66 100644
--- a/compiler/rustc_codegen_ssa/src/traits/builder.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -382,7 +382,7 @@ pub trait BuilderMethods<'a, 'tcx>:
     /// Avoids `alloca`s for Immediates and ScalarPairs.
     ///
     /// FIXME: Maybe do something smarter for Ref types too?
-    /// For now, the `typed_swap` intrinsic just doesn't call this for those
+    /// For now, the `typed_swap_nonoverlapping` intrinsic just doesn't call this for those
     /// cases (in non-debug), preferring the fallback body instead.
     fn typed_place_swap(
         &mut self,
diff --git a/compiler/rustc_const_eval/src/interpret/call.rs b/compiler/rustc_const_eval/src/interpret/call.rs
index 46720328ea4..99f0ac702c5 100644
--- a/compiler/rustc_const_eval/src/interpret/call.rs
+++ b/compiler/rustc_const_eval/src/interpret/call.rs
@@ -883,19 +883,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 .local_to_op(mir::RETURN_PLACE, None)
                 .expect("return place should always be live");
             let dest = self.frame().return_place.clone();
-            let res = if self.stack().len() == 1 {
-                // The initializer of constants and statics will get validated separately
-                // after the constant has been fully evaluated. While we could fall back to the default
-                // code path, that will cause -Zenforce-validity to cycle on static initializers.
-                // Reading from a static's memory is not allowed during its evaluation, and will always
-                // trigger a cycle error. Validation must read from the memory of the current item.
-                // For Miri this means we do not validate the root frame return value,
-                // but Miri anyway calls `read_target_isize` on that so separate validation
-                // is not needed.
-                self.copy_op_no_dest_validation(&op, &dest)
-            } else {
-                self.copy_op_allow_transmute(&op, &dest)
-            };
+            let res = self.copy_op_allow_transmute(&op, &dest);
             trace!("return value: {:?}", self.dump_place(&dest.into()));
             // We delay actually short-circuiting on this error until *after* the stack frame is
             // popped, since we want this error to be attributed to the caller, whose type defines
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index e9eca8814c3..0664a882c1d 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -424,8 +424,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
                 let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
                 self.write_scalar(result, dest)?;
             }
-            sym::typed_swap => {
-                self.typed_swap_intrinsic(&args[0], &args[1])?;
+            sym::typed_swap_nonoverlapping => {
+                self.typed_swap_nonoverlapping_intrinsic(&args[0], &args[1])?;
             }
 
             sym::vtable_size => {
@@ -638,19 +638,35 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
     }
 
     /// Does a *typed* swap of `*left` and `*right`.
-    fn typed_swap_intrinsic(
+    fn typed_swap_nonoverlapping_intrinsic(
         &mut self,
         left: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
         right: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>,
     ) -> InterpResult<'tcx> {
         let left = self.deref_pointer(left)?;
         let right = self.deref_pointer(right)?;
-        debug_assert_eq!(left.layout, right.layout);
+        assert_eq!(left.layout, right.layout);
+        assert!(left.layout.is_sized());
         let kind = MemoryKind::Stack;
         let temp = self.allocate(left.layout, kind)?;
-        self.copy_op(&left, &temp)?;
-        self.copy_op(&right, &left)?;
-        self.copy_op(&temp, &right)?;
+        self.copy_op(&left, &temp)?; // checks alignment of `left`
+
+        // We want to always enforce non-overlapping, even if this is a scalar type.
+        // Therefore we directly use the underlying `mem_copy` here.
+        self.mem_copy(right.ptr(), left.ptr(), left.layout.size, /*nonoverlapping*/ true)?;
+        // This means we also need to do the validation of the value that used to be in `right`
+        // ourselves. This value is now in `left.` The one that started out in `left` already got
+        // validated by the copy above.
+        if M::enforce_validity(self, left.layout) {
+            self.validate_operand(
+                &left.clone().into(),
+                M::enforce_validity_recursively(self, left.layout),
+                /*reset_provenance_and_padding*/ true,
+            )?;
+        }
+
+        self.copy_op(&temp, &right)?; // checks alignment of `right`
+
         self.deallocate_ptr(temp.ptr(), None, kind)?;
         interp_ok(())
     }
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 810e9356b26..0d974071619 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -775,31 +775,13 @@ where
 
     /// Copies the data from an operand to a place.
     /// The layouts of the `src` and `dest` may disagree.
-    /// Does not perform validation of the destination.
-    /// The only known use case for this function is checking the return
-    /// value of a static during stack frame popping.
-    #[inline(always)]
-    pub(super) fn copy_op_no_dest_validation(
-        &mut self,
-        src: &impl Projectable<'tcx, M::Provenance>,
-        dest: &impl Writeable<'tcx, M::Provenance>,
-    ) -> InterpResult<'tcx> {
-        self.copy_op_inner(
-            src, dest, /* allow_transmute */ true, /* validate_dest */ false,
-        )
-    }
-
-    /// Copies the data from an operand to a place.
-    /// The layouts of the `src` and `dest` may disagree.
     #[inline(always)]
     pub fn copy_op_allow_transmute(
         &mut self,
         src: &impl Projectable<'tcx, M::Provenance>,
         dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        self.copy_op_inner(
-            src, dest, /* allow_transmute */ true, /* validate_dest */ true,
-        )
+        self.copy_op_inner(src, dest, /* allow_transmute */ true)
     }
 
     /// Copies the data from an operand to a place.
@@ -810,9 +792,7 @@ where
         src: &impl Projectable<'tcx, M::Provenance>,
         dest: &impl Writeable<'tcx, M::Provenance>,
     ) -> InterpResult<'tcx> {
-        self.copy_op_inner(
-            src, dest, /* allow_transmute */ false, /* validate_dest */ true,
-        )
+        self.copy_op_inner(src, dest, /* allow_transmute */ false)
     }
 
     /// Copies the data from an operand to a place.
@@ -824,22 +804,21 @@ where
         src: &impl Projectable<'tcx, M::Provenance>,
         dest: &impl Writeable<'tcx, M::Provenance>,
         allow_transmute: bool,
-        validate_dest: bool,
     ) -> InterpResult<'tcx> {
         // These are technically *two* typed copies: `src` is a not-yet-loaded value,
-        // so we're going a typed copy at `src` type from there to some intermediate storage.
+        // so we're doing a typed copy at `src` type from there to some intermediate storage.
         // And then we're doing a second typed copy from that intermediate storage to `dest`.
         // But as an optimization, we only make a single direct copy here.
 
         // Do the actual copy.
         self.copy_op_no_validate(src, dest, allow_transmute)?;
 
-        if validate_dest && M::enforce_validity(self, dest.layout()) {
+        if M::enforce_validity(self, dest.layout()) {
             let dest = dest.to_place();
             // Given that there were two typed copies, we have to ensure this is valid at both types,
             // and we have to ensure this loses provenance and padding according to both types.
             // But if the types are identical, we only do one pass.
-            if allow_transmute && src.layout().ty != dest.layout().ty {
+            if src.layout().ty != dest.layout().ty {
                 self.validate_operand(
                     &dest.transmute(src.layout(), self)?,
                     M::enforce_validity_recursively(self, src.layout()),
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
index 427ef141c72..fd78bf3e8fc 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
@@ -501,7 +501,9 @@ pub fn check_intrinsic_type(
                 (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit)
             }
 
-            sym::typed_swap => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)); 2], tcx.types.unit),
+            sym::typed_swap_nonoverlapping => {
+                (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)); 2], tcx.types.unit)
+            }
 
             sym::discriminant_value => {
                 let assoc_items = tcx.associated_item_def_ids(
diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs
index 1b017a07d60..4ecc4201f89 100644
--- a/compiler/rustc_span/src/symbol.rs
+++ b/compiler/rustc_span/src/symbol.rs
@@ -2059,7 +2059,7 @@ symbols! {
         type_macros,
         type_name,
         type_privacy_lints,
-        typed_swap,
+        typed_swap_nonoverlapping,
         u128,
         u128_legacy_const_max,
         u128_legacy_const_min,