about summary refs log tree commit diff
path: root/compiler/rustc_codegen_ssa/src
diff options
context:
space:
mode:
authorScott McMurray <scottmcm@users.noreply.github.com>2023-02-24 18:32:52 -0800
committerScott McMurray <scottmcm@users.noreply.github.com>2023-03-22 15:15:41 -0700
commit64cce5fc7d2c1070adeaa719932b4bbccf27dd46 (patch)
tree918975145ee82f7777d722aedea9cc75f218dcce /compiler/rustc_codegen_ssa/src
parenta266f11990d9544ee408e213e1eec8cc9eb032b7 (diff)
downloadrust-64cce5fc7d2c1070adeaa719932b4bbccf27dd46.tar.gz
rust-64cce5fc7d2c1070adeaa719932b4bbccf27dd46.zip
Add `CastKind::Transmute` to MIR
Updates `interpret`, `codegen_ssa`, and `codegen_cranelift` to consume the new cast instead of the intrinsic.

Includes `CastTransmute` for custom MIR building, to be able to test the extra UB.
Diffstat (limited to 'compiler/rustc_codegen_ssa/src')
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs85
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs60
2 files changed, 60 insertions, 85 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index bdfc0aa1c30..d412798b324 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -16,7 +16,7 @@ use rustc_index::vec::Idx;
 use rustc_middle::mir::{self, AssertKind, SwitchTargets};
 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement};
 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
-use rustc_middle::ty::{self, Instance, Ty, TypeVisitableExt};
+use rustc_middle::ty::{self, Instance, Ty};
 use rustc_session::config::OptLevel;
 use rustc_span::source_map::Span;
 use rustc_span::{sym, Symbol};
@@ -769,23 +769,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             None => bx.fn_abi_of_fn_ptr(sig, extra_args),
         };
 
-        if intrinsic == Some(sym::transmute) {
-            return if let Some(target) = target {
-                self.codegen_transmute(bx, &args[0], destination);
-                helper.funclet_br(self, bx, target, mergeable_succ)
-            } else {
-                // If we are trying to transmute to an uninhabited type,
-                // it is likely there is no allotted destination. In fact,
-                // transmuting to an uninhabited type is UB, which means
-                // we can do what we like. Here, we declare that transmuting
-                // into an uninhabited type is impossible, so anything following
-                // it must be unreachable.
-                assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited);
-                bx.unreachable();
-                MergingSucc::False
-            };
-        }
-
         if let Some(merging_succ) = self.codegen_panic_intrinsic(
             &helper,
             bx,
@@ -828,7 +811,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         match intrinsic {
             None | Some(sym::drop_in_place) => {}
-            Some(sym::copy_nonoverlapping) => unreachable!(),
             Some(intrinsic) => {
                 let dest = match ret_dest {
                     _ if fn_abi.ret.is_indirect() => llargs[0],
@@ -1739,71 +1721,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         }
     }
 
-    fn codegen_transmute(&mut self, bx: &mut Bx, src: &mir::Operand<'tcx>, dst: mir::Place<'tcx>) {
-        if let Some(index) = dst.as_local() {
-            match self.locals[index] {
-                LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
-                LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
-                LocalRef::Operand(None) => {
-                    let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst.as_ref()));
-                    assert!(!dst_layout.ty.has_erasable_regions());
-                    let place = PlaceRef::alloca(bx, dst_layout);
-                    place.storage_live(bx);
-                    self.codegen_transmute_into(bx, src, place);
-                    let op = bx.load_operand(place);
-                    place.storage_dead(bx);
-                    self.locals[index] = LocalRef::Operand(Some(op));
-                    self.debug_introduce_local(bx, index);
-                }
-                LocalRef::Operand(Some(op)) => {
-                    assert!(op.layout.is_zst(), "assigning to initialized SSAtemp");
-                }
-            }
-        } else {
-            let dst = self.codegen_place(bx, dst.as_ref());
-            self.codegen_transmute_into(bx, src, dst);
-        }
-    }
-
-    fn codegen_transmute_into(
-        &mut self,
-        bx: &mut Bx,
-        src: &mir::Operand<'tcx>,
-        dst: PlaceRef<'tcx, Bx::Value>,
-    ) {
-        let src = self.codegen_operand(bx, src);
-
-        // Special-case transmutes between scalars as simple bitcasts.
-        match (src.layout.abi, dst.layout.abi) {
-            (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
-                // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
-                let src_is_ptr = matches!(src_scalar.primitive(), abi::Pointer(_));
-                let dst_is_ptr = matches!(dst_scalar.primitive(), abi::Pointer(_));
-                if src_is_ptr == dst_is_ptr {
-                    assert_eq!(src.layout.size, dst.layout.size);
-
-                    // NOTE(eddyb) the `from_immediate` and `to_immediate_scalar`
-                    // conversions allow handling `bool`s the same as `u8`s.
-                    let src = bx.from_immediate(src.immediate());
-                    // LLVM also doesn't like `bitcast`s between pointers in different address spaces.
-                    let src_as_dst = if src_is_ptr {
-                        bx.pointercast(src, bx.backend_type(dst.layout))
-                    } else {
-                        bx.bitcast(src, bx.backend_type(dst.layout))
-                    };
-                    Immediate(bx.to_immediate_scalar(src_as_dst, dst_scalar)).store(bx, dst);
-                    return;
-                }
-            }
-            _ => {}
-        }
-
-        let llty = bx.backend_type(src.layout);
-        let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
-        let align = src.layout.align.abi.min(dst.align);
-        src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align));
-    }
-
     // Stores the return value of a function call into it's final location.
     fn store_return(
         &mut self,
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 13c4fa132d8..72d41d8c32c 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -13,7 +13,7 @@ use rustc_middle::ty::cast::{CastTy, IntTy};
 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
 use rustc_span::source_map::{Span, DUMMY_SP};
-use rustc_target::abi::VariantIdx;
+use rustc_target::abi::{self, VariantIdx};
 
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     #[instrument(level = "trace", skip(self, bx))]
@@ -72,6 +72,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 }
             }
 
+            mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
+                let src = self.codegen_operand(bx, operand);
+                self.codegen_transmute(bx, src, dest);
+            }
+
             mir::Rvalue::Repeat(ref elem, count) => {
                 let cg_elem = self.codegen_operand(bx, elem);
 
@@ -143,6 +148,52 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         }
     }
 
+    fn codegen_transmute(
+        &mut self,
+        bx: &mut Bx,
+        src: OperandRef<'tcx, Bx::Value>,
+        dst: PlaceRef<'tcx, Bx::Value>,
+    ) {
+        // The MIR validator enforces no unsized transmutes.
+        debug_assert!(src.layout.is_sized());
+        debug_assert!(dst.layout.is_sized());
+
+        if src.layout.size != dst.layout.size
+            || src.layout.abi == abi::Abi::Uninhabited
+            || dst.layout.abi == abi::Abi::Uninhabited
+        {
+            // In all of these cases it's UB to run this transmute, but that's
+            // known statically so might as well trap for it, rather than just
+            // making it unreachable.
+            bx.abort();
+            return;
+        }
+
+        let size_in_bytes = src.layout.size.bytes();
+        if size_in_bytes == 0 {
+            // Nothing to write
+            return;
+        }
+
+        match src.val {
+            OperandValue::Ref(src_llval, meta, src_align) => {
+                debug_assert_eq!(meta, None);
+                // For a place-to-place transmute, call `memcpy` directly so that
+                // both arguments get the best-available alignment information.
+                let bytes = bx.cx().const_usize(size_in_bytes);
+                let flags = MemFlags::empty();
+                bx.memcpy(dst.llval, dst.align, src_llval, src_align, bytes, flags);
+            }
+            OperandValue::Immediate(_) | OperandValue::Pair(_, _) => {
+                // When we have immediate(s), the alignment of the source is irrelevant,
+                // so we can store them using the destination's alignment.
+                let llty = bx.backend_type(src.layout);
+                let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
+                src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, dst.align));
+            }
+        }
+    }
+
     pub fn codegen_rvalue_unsized(
         &mut self,
         bx: &mut Bx,
@@ -344,6 +395,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         };
                         OperandValue::Immediate(newval)
                     }
+                    mir::CastKind::Transmute => {
+                        bug!("Transmute operand {:?} in `codegen_rvalue_operand`", operand);
+                    }
                 };
                 OperandRef { val, layout: cast }
             }
@@ -673,6 +727,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
         match *rvalue {
+            mir::Rvalue::Cast(mir::CastKind::Transmute, ..) =>
+                // FIXME: Now that transmute is an Rvalue, it would be nice if
+                // it could create `Immediate`s for scalars, where possible.
+                false,
             mir::Rvalue::Ref(..) |
             mir::Rvalue::CopyForDeref(..) |
             mir::Rvalue::AddressOf(..) |