about summary refs log tree commit diff
path: root/compiler
diff options
context:
space:
mode:
authorScott McMurray <scottmcm@users.noreply.github.com>2023-02-24 18:32:52 -0800
committerScott McMurray <scottmcm@users.noreply.github.com>2023-03-22 15:15:41 -0700
commit64cce5fc7d2c1070adeaa719932b4bbccf27dd46 (patch)
tree918975145ee82f7777d722aedea9cc75f218dcce /compiler
parenta266f11990d9544ee408e213e1eec8cc9eb032b7 (diff)
downloadrust-64cce5fc7d2c1070adeaa719932b4bbccf27dd46.tar.gz
rust-64cce5fc7d2c1070adeaa719932b4bbccf27dd46.zip
Add `CastKind::Transmute` to MIR
Updates `interpret`, `codegen_ssa`, and `codegen_cranelift` to consume the new cast instead of the intrinsic.

Includes `CastTransmute` for custom MIR building, to be able to test the extra UB.
Diffstat (limited to 'compiler')
-rw-r--r--compiler/rustc_borrowck/src/type_check/mod.rs7
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs2
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/block.rs85
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/rvalue.rs60
-rw-r--r--compiler/rustc_const_eval/src/interpret/cast.rs16
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs4
-rw-r--r--compiler/rustc_const_eval/src/transform/validate.rs27
-rw-r--r--compiler/rustc_middle/src/mir/mod.rs3
-rw-r--r--compiler/rustc_middle/src/mir/syntax.rs7
-rw-r--r--compiler/rustc_mir_build/src/build/custom/parse/instruction.rs4
-rw-r--r--compiler/rustc_mir_transform/src/const_prop.rs9
-rw-r--r--compiler/rustc_mir_transform/src/lower_intrinsics.rs26
14 files changed, 163 insertions, 101 deletions
diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs
index f67dae9beb9..c2a426bea09 100644
--- a/compiler/rustc_borrowck/src/type_check/mod.rs
+++ b/compiler/rustc_borrowck/src/type_check/mod.rs
@@ -2222,6 +2222,13 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
                             }
                         }
                     }
+                    CastKind::Transmute => {
+                        span_mirbug!(
+                            self,
+                            rvalue,
+                            "Unexpected CastKind::Transmute, which is not permitted in Analysis MIR",
+                        );
+                    }
                 }
             }
 
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index 1b8e9312e2f..2107ae147e9 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -709,6 +709,10 @@ fn codegen_stmt<'tcx>(
                     let operand = codegen_operand(fx, operand);
                     operand.coerce_dyn_star(fx, lval);
                 }
+                Rvalue::Cast(CastKind::Transmute, ref operand, _to_ty) => {
+                    let operand = codegen_operand(fx, operand);
+                    lval.write_cvalue_transmute(fx, operand);
+                }
                 Rvalue::Discriminant(place) => {
                     let place = codegen_place(fx, place);
                     let value = place.to_cvalue(fx);
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index fe48cac4faf..03f2a65fcca 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -557,16 +557,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
             fx.bcx.ins().band(ptr, mask);
         }
 
-        sym::transmute => {
-            intrinsic_args!(fx, args => (from); intrinsic);
-
-            if ret.layout().abi.is_uninhabited() {
-                crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
-                return;
-            }
-
-            ret.write_cvalue_transmute(fx, from);
-        }
         sym::write_bytes | sym::volatile_set_memory => {
             intrinsic_args!(fx, args => (dst, val, count); intrinsic);
             let val = val.load_scalar(fx);
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 9c921989ca9..012e25884ca 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -378,7 +378,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 }
             }
 
-            _ => bug!("unknown intrinsic '{}'", name),
+            _ => bug!("unknown intrinsic '{}' -- should it have been lowered earlier?", name),
         };
 
         if !fn_abi.ret.is_ignore() {
diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs
index bdfc0aa1c30..d412798b324 100644
--- a/compiler/rustc_codegen_ssa/src/mir/block.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/block.rs
@@ -16,7 +16,7 @@ use rustc_index::vec::Idx;
 use rustc_middle::mir::{self, AssertKind, SwitchTargets};
 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement};
 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
-use rustc_middle::ty::{self, Instance, Ty, TypeVisitableExt};
+use rustc_middle::ty::{self, Instance, Ty};
 use rustc_session::config::OptLevel;
 use rustc_span::source_map::Span;
 use rustc_span::{sym, Symbol};
@@ -769,23 +769,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             None => bx.fn_abi_of_fn_ptr(sig, extra_args),
         };
 
-        if intrinsic == Some(sym::transmute) {
-            return if let Some(target) = target {
-                self.codegen_transmute(bx, &args[0], destination);
-                helper.funclet_br(self, bx, target, mergeable_succ)
-            } else {
-                // If we are trying to transmute to an uninhabited type,
-                // it is likely there is no allotted destination. In fact,
-                // transmuting to an uninhabited type is UB, which means
-                // we can do what we like. Here, we declare that transmuting
-                // into an uninhabited type is impossible, so anything following
-                // it must be unreachable.
-                assert_eq!(fn_abi.ret.layout.abi, abi::Abi::Uninhabited);
-                bx.unreachable();
-                MergingSucc::False
-            };
-        }
-
         if let Some(merging_succ) = self.codegen_panic_intrinsic(
             &helper,
             bx,
@@ -828,7 +811,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
         match intrinsic {
             None | Some(sym::drop_in_place) => {}
-            Some(sym::copy_nonoverlapping) => unreachable!(),
             Some(intrinsic) => {
                 let dest = match ret_dest {
                     _ if fn_abi.ret.is_indirect() => llargs[0],
@@ -1739,71 +1721,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         }
     }
 
-    fn codegen_transmute(&mut self, bx: &mut Bx, src: &mir::Operand<'tcx>, dst: mir::Place<'tcx>) {
-        if let Some(index) = dst.as_local() {
-            match self.locals[index] {
-                LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
-                LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
-                LocalRef::Operand(None) => {
-                    let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst.as_ref()));
-                    assert!(!dst_layout.ty.has_erasable_regions());
-                    let place = PlaceRef::alloca(bx, dst_layout);
-                    place.storage_live(bx);
-                    self.codegen_transmute_into(bx, src, place);
-                    let op = bx.load_operand(place);
-                    place.storage_dead(bx);
-                    self.locals[index] = LocalRef::Operand(Some(op));
-                    self.debug_introduce_local(bx, index);
-                }
-                LocalRef::Operand(Some(op)) => {
-                    assert!(op.layout.is_zst(), "assigning to initialized SSAtemp");
-                }
-            }
-        } else {
-            let dst = self.codegen_place(bx, dst.as_ref());
-            self.codegen_transmute_into(bx, src, dst);
-        }
-    }
-
-    fn codegen_transmute_into(
-        &mut self,
-        bx: &mut Bx,
-        src: &mir::Operand<'tcx>,
-        dst: PlaceRef<'tcx, Bx::Value>,
-    ) {
-        let src = self.codegen_operand(bx, src);
-
-        // Special-case transmutes between scalars as simple bitcasts.
-        match (src.layout.abi, dst.layout.abi) {
-            (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
-                // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
-                let src_is_ptr = matches!(src_scalar.primitive(), abi::Pointer(_));
-                let dst_is_ptr = matches!(dst_scalar.primitive(), abi::Pointer(_));
-                if src_is_ptr == dst_is_ptr {
-                    assert_eq!(src.layout.size, dst.layout.size);
-
-                    // NOTE(eddyb) the `from_immediate` and `to_immediate_scalar`
-                    // conversions allow handling `bool`s the same as `u8`s.
-                    let src = bx.from_immediate(src.immediate());
-                    // LLVM also doesn't like `bitcast`s between pointers in different address spaces.
-                    let src_as_dst = if src_is_ptr {
-                        bx.pointercast(src, bx.backend_type(dst.layout))
-                    } else {
-                        bx.bitcast(src, bx.backend_type(dst.layout))
-                    };
-                    Immediate(bx.to_immediate_scalar(src_as_dst, dst_scalar)).store(bx, dst);
-                    return;
-                }
-            }
-            _ => {}
-        }
-
-        let llty = bx.backend_type(src.layout);
-        let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
-        let align = src.layout.align.abi.min(dst.align);
-        src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, align));
-    }
-
     // Stores the return value of a function call into it's final location.
     fn store_return(
         &mut self,
diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
index 13c4fa132d8..72d41d8c32c 100644
--- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs
@@ -13,7 +13,7 @@ use rustc_middle::ty::cast::{CastTy, IntTy};
 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
 use rustc_middle::ty::{self, adjustment::PointerCast, Instance, Ty, TyCtxt};
 use rustc_span::source_map::{Span, DUMMY_SP};
-use rustc_target::abi::VariantIdx;
+use rustc_target::abi::{self, VariantIdx};
 
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     #[instrument(level = "trace", skip(self, bx))]
@@ -72,6 +72,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 }
             }
 
+            mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
+                let src = self.codegen_operand(bx, operand);
+                self.codegen_transmute(bx, src, dest);
+            }
+
             mir::Rvalue::Repeat(ref elem, count) => {
                 let cg_elem = self.codegen_operand(bx, elem);
 
@@ -143,6 +148,52 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         }
     }
 
+    fn codegen_transmute(
+        &mut self,
+        bx: &mut Bx,
+        src: OperandRef<'tcx, Bx::Value>,
+        dst: PlaceRef<'tcx, Bx::Value>,
+    ) {
+        // The MIR validator enforces no unsized transmutes.
+        debug_assert!(src.layout.is_sized());
+        debug_assert!(dst.layout.is_sized());
+
+        if src.layout.size != dst.layout.size
+            || src.layout.abi == abi::Abi::Uninhabited
+            || dst.layout.abi == abi::Abi::Uninhabited
+        {
+            // In all of these cases it's UB to run this transmute, but that's
+            // known statically so might as well trap for it, rather than just
+            // making it unreachable.
+            bx.abort();
+            return;
+        }
+
+        let size_in_bytes = src.layout.size.bytes();
+        if size_in_bytes == 0 {
+            // Nothing to write
+            return;
+        }
+
+        match src.val {
+            OperandValue::Ref(src_llval, meta, src_align) => {
+                debug_assert_eq!(meta, None);
+                // For a place-to-place transmute, call `memcpy` directly so that
+                // both arguments get the best-available alignment information.
+                let bytes = bx.cx().const_usize(size_in_bytes);
+                let flags = MemFlags::empty();
+                bx.memcpy(dst.llval, dst.align, src_llval, src_align, bytes, flags);
+            }
+            OperandValue::Immediate(_) | OperandValue::Pair(_, _) => {
+                // When we have immediate(s), the alignment of the source is irrelevant,
+                // so we can store them using the destination's alignment.
+                let llty = bx.backend_type(src.layout);
+                let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
+                src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, dst.align));
+            }
+        }
+    }
+
     pub fn codegen_rvalue_unsized(
         &mut self,
         bx: &mut Bx,
@@ -344,6 +395,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                         };
                         OperandValue::Immediate(newval)
                     }
+                    mir::CastKind::Transmute => {
+                        bug!("Transmute operand {:?} in `codegen_rvalue_operand`", operand);
+                    }
                 };
                 OperandRef { val, layout: cast }
             }
@@ -673,6 +727,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
         match *rvalue {
+            mir::Rvalue::Cast(mir::CastKind::Transmute, ..) =>
+                // FIXME: Now that transmute is an Rvalue, it would be nice if
+                // it could create `Immediate`s for scalars, where possible.
+                false,
             mir::Rvalue::Ref(..) |
             mir::Rvalue::CopyForDeref(..) |
             mir::Rvalue::AddressOf(..) |
diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs
index c14152a916a..163e3f86993 100644
--- a/compiler/rustc_const_eval/src/interpret/cast.rs
+++ b/compiler/rustc_const_eval/src/interpret/cast.rs
@@ -133,6 +133,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     bug!()
                 }
             }
+
+            Transmute => {
+                assert!(src.layout.is_sized());
+                assert!(dest.layout.is_sized());
+                if src.layout.size != dest.layout.size {
+                    throw_ub_format!(
+                        "transmuting from {}-byte type to {}-byte type: `{}` -> `{}`",
+                        src.layout.size.bytes(),
+                        dest.layout.size.bytes(),
+                        src.layout.ty,
+                        dest.layout.ty,
+                    );
+                }
+
+                self.copy_op(src, dest, /*allow_transmute*/ true)?;
+            }
         }
         Ok(())
     }
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index a29cdade023..26fb041b455 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -127,7 +127,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // First handle intrinsics without return place.
         let ret = match ret {
             None => match intrinsic_name {
-                sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
                 sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
                 // Unsupported diverging intrinsic.
                 _ => return Ok(false),
@@ -411,9 +410,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 self.exact_div(&val, &size, dest)?;
             }
 
-            sym::transmute => {
-                self.copy_op(&args[0], dest, /*allow_transmute*/ true)?;
-            }
             sym::assert_inhabited
             | sym::assert_zero_valid
             | sym::assert_mem_uninitialized_valid => {
diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs
index e0939d1d1ba..66fc1c07e20 100644
--- a/compiler/rustc_const_eval/src/transform/validate.rs
+++ b/compiler/rustc_const_eval/src/transform/validate.rs
@@ -621,6 +621,33 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
                             );
                         }
                     }
+                    CastKind::Transmute => {
+                        if let MirPhase::Runtime(..) = self.mir_phase {
+                            // Unlike `mem::transmute`, a MIR `Transmute` is well-formed
+                            // for any two `Sized` types, just potentially UB to run.
+
+                            if !op_ty.is_sized(self.tcx, self.param_env) {
+                                self.fail(
+                                    location,
+                                    format!("Cannot transmute from non-`Sized` type {op_ty:?}"),
+                                );
+                            }
+                            if !target_type.is_sized(self.tcx, self.param_env) {
+                                self.fail(
+                                    location,
+                                    format!("Cannot transmute to non-`Sized` type {target_type:?}"),
+                                );
+                            }
+                        } else {
+                            self.fail(
+                                location,
+                                format!(
+                                    "Transmute is not supported in non-runtime phase {:?}.",
+                                    self.mir_phase
+                                ),
+                            );
+                        }
+                    }
                 }
             }
             Rvalue::Repeat(_, _)
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index 638c082cc84..9c575f6eb9f 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -1967,7 +1967,8 @@ impl<'tcx> Rvalue<'tcx> {
                 | CastKind::PtrToPtr
                 | CastKind::Pointer(_)
                 | CastKind::PointerFromExposedAddress
-                | CastKind::DynStar,
+                | CastKind::DynStar
+                | CastKind::Transmute,
                 _,
                 _,
             )
diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs
index 3a893cdabf6..bbd913d071d 100644
--- a/compiler/rustc_middle/src/mir/syntax.rs
+++ b/compiler/rustc_middle/src/mir/syntax.rs
@@ -1156,6 +1156,13 @@ pub enum CastKind {
     IntToFloat,
     PtrToPtr,
     FnPtrToPtr,
+    /// Reinterpret the bits of the input as a different type.
+    ///
+    /// MIR is well-formed if the input and output types have different sizes,
+    /// but running a transmute between differently-sized types is UB.
+    ///
+    /// Allowed only in [`MirPhase::Runtime`]; Earlier it's a [`TerminatorKind::Call`].
+    Transmute,
 }
 
 #[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
diff --git a/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs b/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
index adbd37a7cd9..bf58b3090fb 100644
--- a/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
+++ b/compiler/rustc_mir_build/src/build/custom/parse/instruction.rs
@@ -137,6 +137,10 @@ impl<'tcx, 'body> ParseCtxt<'tcx, 'body> {
     fn parse_rvalue(&self, expr_id: ExprId) -> PResult<Rvalue<'tcx>> {
         parse_by_kind!(self, expr_id, expr, "rvalue",
             @call("mir_discriminant", args) => self.parse_place(args[0]).map(Rvalue::Discriminant),
+            @call("mir_cast_transmute", args) => {
+                let source = self.parse_operand(args[0])?;
+                Ok(Rvalue::Cast(CastKind::Transmute, source, expr.ty))
+            },
             @call("mir_checked", args) => {
                 parse_by_kind!(self, args[0], _, "binary op",
                     ExprKind::Binary { op, lhs, rhs } => Ok(Rvalue::CheckedBinaryOp(
diff --git a/compiler/rustc_mir_transform/src/const_prop.rs b/compiler/rustc_mir_transform/src/const_prop.rs
index 8b81abb23b0..c1cf6ee0f9e 100644
--- a/compiler/rustc_mir_transform/src/const_prop.rs
+++ b/compiler/rustc_mir_transform/src/const_prop.rs
@@ -504,6 +504,15 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
 
                 return None;
             }
+            // Do not try creating references, nor any types with potentially-complex
+            // invariants. This avoids an issue where checking validity would do a
+            // bunch of work generating a nice message about the invariant violation,
+            // only to not show it to anyone (since this isn't the lint).
+            Rvalue::Cast(CastKind::Transmute, op, dst_ty) if !dst_ty.is_primitive() => {
+                trace!("skipping Transmute of {:?} to {:?}", op, dst_ty);
+
+                return None;
+            }
 
             // There's no other checking to do at this time.
             Rvalue::Aggregate(..)
diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
index 46eab1184bd..6a7ceb8fef7 100644
--- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs
+++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs
@@ -221,6 +221,32 @@ impl<'tcx> MirPass<'tcx> for LowerIntrinsics {
                             terminator.kind = TerminatorKind::Goto { target };
                         }
                     }
+                    sym::transmute => {
+                        let dst_ty = destination.ty(local_decls, tcx).ty;
+                        let Ok([arg]) = <[_; 1]>::try_from(std::mem::take(args)) else {
+                            span_bug!(
+                                terminator.source_info.span,
+                                "Wrong number of arguments for transmute intrinsic",
+                            );
+                        };
+
+                        // Always emit the cast, even if we transmute to an uninhabited type,
+                        // because that lets CTFE and codegen generate better error messages
+                        // when such a transmute actually ends up reachable.
+                        block.statements.push(Statement {
+                            source_info: terminator.source_info,
+                            kind: StatementKind::Assign(Box::new((
+                                *destination,
+                                Rvalue::Cast(CastKind::Transmute, arg, dst_ty),
+                            ))),
+                        });
+
+                        if let Some(target) = *target {
+                            terminator.kind = TerminatorKind::Goto { target };
+                        } else {
+                            terminator.kind = TerminatorKind::Unreachable;
+                        }
+                    }
                     _ if intrinsic_name.as_str().starts_with("simd_shuffle") => {
                         validate_simd_shuffle(tcx, args, terminator.source_info.span);
                     }