about summary refs log tree commit diff
path: root/compiler/rustc_codegen_cranelift/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_cranelift/src')
-rw-r--r--compiler/rustc_codegen_cranelift/src/abi/mod.rs13
-rw-r--r--compiler/rustc_codegen_cranelift/src/base.rs20
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/jit.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs274
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs3
-rw-r--r--compiler/rustc_codegen_cranelift/src/lib.rs2
-rw-r--r--compiler/rustc_codegen_cranelift/src/value_and_place.rs103
9 files changed, 352 insertions, 71 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
index eddb479073c..2c038f22ca9 100644
--- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs
@@ -445,9 +445,14 @@ pub(crate) fn codegen_terminator_call<'tcx>(
 
     // Unpack arguments tuple for closures
     let mut args = if fn_sig.abi() == Abi::RustCall {
-        assert_eq!(args.len(), 2, "rust-call abi requires two arguments");
-        let self_arg = codegen_call_argument_operand(fx, &args[0]);
-        let pack_arg = codegen_call_argument_operand(fx, &args[1]);
+        let (self_arg, pack_arg) = match args {
+            [pack_arg] => (None, codegen_call_argument_operand(fx, pack_arg)),
+            [self_arg, pack_arg] => (
+                Some(codegen_call_argument_operand(fx, self_arg)),
+                codegen_call_argument_operand(fx, pack_arg),
+            ),
+            _ => panic!("rust-call abi requires one or two arguments"),
+        };
 
         let tupled_arguments = match pack_arg.value.layout().ty.kind() {
             ty::Tuple(ref tupled_arguments) => tupled_arguments,
@@ -455,7 +460,7 @@ pub(crate) fn codegen_terminator_call<'tcx>(
         };
 
         let mut args = Vec::with_capacity(1 + tupled_arguments.len());
-        args.push(self_arg);
+        args.extend(self_arg);
         for i in 0..tupled_arguments.len() {
             args.push(CallArgument {
                 value: pack_arg.value.value_field(fx, FieldIdx::new(i)),
diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs
index e05f2146f0c..522dd7189fe 100644
--- a/compiler/rustc_codegen_cranelift/src/base.rs
+++ b/compiler/rustc_codegen_cranelift/src/base.rs
@@ -7,6 +7,8 @@ use rustc_middle::ty::layout::FnAbiOf;
 use rustc_middle::ty::print::with_no_trimmed_paths;
 
 use cranelift_codegen::ir::UserFuncName;
+use cranelift_codegen::CodegenError;
+use cranelift_module::ModuleError;
 
 use crate::constant::ConstantCx;
 use crate::debuginfo::FunctionDebugContext;
@@ -172,7 +174,21 @@ pub(crate) fn compile_fn(
     // Define function
     cx.profiler.generic_activity("define function").run(|| {
         context.want_disasm = cx.should_write_ir;
-        module.define_function(codegened_func.func_id, context).unwrap();
+        match module.define_function(codegened_func.func_id, context) {
+            Ok(()) => {}
+            Err(ModuleError::Compilation(CodegenError::ImplLimitExceeded)) => {
+                let handler = rustc_session::EarlyErrorHandler::new(
+                    rustc_session::config::ErrorOutputType::default(),
+                );
+                handler.early_error(format!(
+                    "backend implementation limit exceeded while compiling {name}",
+                    name = codegened_func.symbol_name
+                ));
+            }
+            Err(err) => {
+                panic!("Error while defining {name}: {err:?}", name = codegened_func.symbol_name);
+            }
+        }
     });
 
     if cx.should_write_ir {
@@ -356,7 +372,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
 
                         codegen_panic_inner(
                             fx,
-                            rustc_hir::LangItem::PanicBoundsCheck,
+                            rustc_hir::LangItem::PanicMisalignedPointerDereference,
                             &[required, found, location],
                             source_info.span,
                         );
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
index 1b454b6667c..50bc7a127af 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/line_info.rs
@@ -165,7 +165,7 @@ impl FunctionDebugContext {
         for &MachSrcLoc { start, end, loc } in mcr.buffer.get_srclocs_sorted() {
             debug_context.dwarf.unit.line_program.row().address_offset = u64::from(start);
             if !loc.is_default() {
-                let source_loc = *self.source_loc_set.get_index(loc.bits() as usize).unwrap();
+                let source_loc = self.source_loc_set[loc.bits() as usize];
                 create_row_for_span(debug_context, source_loc);
             } else {
                 create_row_for_span(debug_context, self.function_source_loc);
diff --git a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
index 3a7421d8b30..8a4b1cccf14 100644
--- a/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/debuginfo/mod.rs
@@ -38,7 +38,7 @@ pub(crate) struct DebugContext {
 pub(crate) struct FunctionDebugContext {
     entry_id: UnitEntryId,
     function_source_loc: (FileId, u64, u64),
-    source_loc_set: indexmap::IndexSet<(FileId, u64, u64)>,
+    source_loc_set: IndexSet<(FileId, u64, u64)>,
 }
 
 impl DebugContext {
diff --git a/compiler/rustc_codegen_cranelift/src/driver/jit.rs b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
index 41e24acefbe..3ea38842148 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/jit.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/jit.rs
@@ -114,9 +114,9 @@ pub(crate) fn run_jit(tcx: TyCtxt<'_>, backend_config: BackendConfig) -> ! {
         .iter()
         .map(|cgu| cgu.items_in_deterministic_order(tcx).into_iter())
         .flatten()
-        .collect::<FxHashMap<_, (_, _)>>()
+        .collect::<FxHashMap<_, _>>()
         .into_iter()
-        .collect::<Vec<(_, (_, _))>>();
+        .collect::<Vec<(_, _)>>();
 
     tcx.sess.time("codegen mono items", || {
         super::predefine_mono_items(tcx, &mut jit_module, &mono_items);
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
index 18162fb5ab2..fdd27a454e0 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/llvm_x86.rs
@@ -18,6 +18,20 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
             // Spin loop hint
         }
 
+        // Used by is_x86_feature_detected!();
+        "llvm.x86.xgetbv" => {
+            // FIXME use the actual xgetbv instruction
+            intrinsic_args!(fx, args => (v); intrinsic);
+
+            let v = v.load_scalar(fx);
+
+            // As of writing on XCR0 exists
+            fx.bcx.ins().trapnz(v, TrapCode::UnreachableCodeReached);
+
+            let res = fx.bcx.ins().iconst(types::I64, 1 /* bit 0 must be set */);
+            ret.write_cvalue(fx, CValue::by_val(res, fx.layout_of(fx.tcx.types.i64)));
+        }
+
         // Used by `_mm_movemask_epi8` and `_mm256_movemask_epi8`
         "llvm.x86.sse2.pmovmskb.128"
         | "llvm.x86.avx2.pmovmskb"
@@ -53,7 +67,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
             let res = CValue::by_val(res, fx.layout_of(fx.tcx.types.i32));
             ret.write_cvalue(fx, res);
         }
-        "llvm.x86.sse2.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
+        "llvm.x86.sse.cmp.ps" | "llvm.x86.sse2.cmp.pd" => {
             let (x, y, kind) = match args {
                 [x, y, kind] => (x, y, kind),
                 _ => bug!("wrong number of args for intrinsic {intrinsic}"),
@@ -66,18 +80,95 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
             let flt_cc = match kind
                 .try_to_bits(Size::from_bytes(1))
                 .unwrap_or_else(|| panic!("kind not scalar: {:?}", kind))
+                .try_into()
+                .unwrap()
             {
-                0 => FloatCC::Equal,
-                1 => FloatCC::LessThan,
-                2 => FloatCC::LessThanOrEqual,
-                7 => FloatCC::Ordered,
-                3 => FloatCC::Unordered,
-                4 => FloatCC::NotEqual,
-                5 => FloatCC::UnorderedOrGreaterThanOrEqual,
-                6 => FloatCC::UnorderedOrGreaterThan,
+                _CMP_EQ_OQ | _CMP_EQ_OS => FloatCC::Equal,
+                _CMP_LT_OS | _CMP_LT_OQ => FloatCC::LessThan,
+                _CMP_LE_OS | _CMP_LE_OQ => FloatCC::LessThanOrEqual,
+                _CMP_UNORD_Q | _CMP_UNORD_S => FloatCC::Unordered,
+                _CMP_NEQ_UQ | _CMP_NEQ_US => FloatCC::NotEqual,
+                _CMP_NLT_US | _CMP_NLT_UQ => FloatCC::UnorderedOrGreaterThanOrEqual,
+                _CMP_NLE_US | _CMP_NLE_UQ => FloatCC::UnorderedOrGreaterThan,
+                _CMP_ORD_Q | _CMP_ORD_S => FloatCC::Ordered,
+                _CMP_EQ_UQ | _CMP_EQ_US => FloatCC::UnorderedOrEqual,
+                _CMP_NGE_US | _CMP_NGE_UQ => FloatCC::UnorderedOrLessThan,
+                _CMP_NGT_US | _CMP_NGT_UQ => FloatCC::UnorderedOrLessThanOrEqual,
+                _CMP_FALSE_OQ | _CMP_FALSE_OS => todo!(),
+                _CMP_NEQ_OQ | _CMP_NEQ_OS => FloatCC::OrderedNotEqual,
+                _CMP_GE_OS | _CMP_GE_OQ => FloatCC::GreaterThanOrEqual,
+                _CMP_GT_OS | _CMP_GT_OQ => FloatCC::GreaterThan,
+                _CMP_TRUE_UQ | _CMP_TRUE_US => todo!(),
+
                 kind => unreachable!("kind {:?}", kind),
             };
 
+            // Copied from stdarch
+            /// Equal (ordered, non-signaling)
+            const _CMP_EQ_OQ: i32 = 0x00;
+            /// Less-than (ordered, signaling)
+            const _CMP_LT_OS: i32 = 0x01;
+            /// Less-than-or-equal (ordered, signaling)
+            const _CMP_LE_OS: i32 = 0x02;
+            /// Unordered (non-signaling)
+            const _CMP_UNORD_Q: i32 = 0x03;
+            /// Not-equal (unordered, non-signaling)
+            const _CMP_NEQ_UQ: i32 = 0x04;
+            /// Not-less-than (unordered, signaling)
+            const _CMP_NLT_US: i32 = 0x05;
+            /// Not-less-than-or-equal (unordered, signaling)
+            const _CMP_NLE_US: i32 = 0x06;
+            /// Ordered (non-signaling)
+            const _CMP_ORD_Q: i32 = 0x07;
+            /// Equal (unordered, non-signaling)
+            const _CMP_EQ_UQ: i32 = 0x08;
+            /// Not-greater-than-or-equal (unordered, signaling)
+            const _CMP_NGE_US: i32 = 0x09;
+            /// Not-greater-than (unordered, signaling)
+            const _CMP_NGT_US: i32 = 0x0a;
+            /// False (ordered, non-signaling)
+            const _CMP_FALSE_OQ: i32 = 0x0b;
+            /// Not-equal (ordered, non-signaling)
+            const _CMP_NEQ_OQ: i32 = 0x0c;
+            /// Greater-than-or-equal (ordered, signaling)
+            const _CMP_GE_OS: i32 = 0x0d;
+            /// Greater-than (ordered, signaling)
+            const _CMP_GT_OS: i32 = 0x0e;
+            /// True (unordered, non-signaling)
+            const _CMP_TRUE_UQ: i32 = 0x0f;
+            /// Equal (ordered, signaling)
+            const _CMP_EQ_OS: i32 = 0x10;
+            /// Less-than (ordered, non-signaling)
+            const _CMP_LT_OQ: i32 = 0x11;
+            /// Less-than-or-equal (ordered, non-signaling)
+            const _CMP_LE_OQ: i32 = 0x12;
+            /// Unordered (signaling)
+            const _CMP_UNORD_S: i32 = 0x13;
+            /// Not-equal (unordered, signaling)
+            const _CMP_NEQ_US: i32 = 0x14;
+            /// Not-less-than (unordered, non-signaling)
+            const _CMP_NLT_UQ: i32 = 0x15;
+            /// Not-less-than-or-equal (unordered, non-signaling)
+            const _CMP_NLE_UQ: i32 = 0x16;
+            /// Ordered (signaling)
+            const _CMP_ORD_S: i32 = 0x17;
+            /// Equal (unordered, signaling)
+            const _CMP_EQ_US: i32 = 0x18;
+            /// Not-greater-than-or-equal (unordered, non-signaling)
+            const _CMP_NGE_UQ: i32 = 0x19;
+            /// Not-greater-than (unordered, non-signaling)
+            const _CMP_NGT_UQ: i32 = 0x1a;
+            /// False (ordered, signaling)
+            const _CMP_FALSE_OS: i32 = 0x1b;
+            /// Not-equal (ordered, signaling)
+            const _CMP_NEQ_OS: i32 = 0x1c;
+            /// Greater-than-or-equal (ordered, non-signaling)
+            const _CMP_GE_OQ: i32 = 0x1d;
+            /// Greater-than (ordered, non-signaling)
+            const _CMP_GT_OQ: i32 = 0x1e;
+            /// True (unordered, signaling)
+            const _CMP_TRUE_US: i32 = 0x1f;
+
             simd_pair_for_each_lane(fx, x, y, ret, &|fx, lane_ty, res_lane_ty, x_lane, y_lane| {
                 let res_lane = match lane_ty.kind() {
                     ty::Float(_) => fx.bcx.ins().fcmp(flt_cc, x_lane, y_lane),
@@ -103,6 +194,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
                 _ => fx.bcx.ins().iconst(types::I32, 0),
             });
         }
+        "llvm.x86.sse2.psrai.d" => {
+            let (a, imm8) = match args {
+                [a, imm8] => (a, imm8),
+                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+            };
+            let a = codegen_operand(fx, a);
+            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+                .expect("llvm.x86.sse2.psrai.d imm8 not const");
+
+            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+                .try_to_bits(Size::from_bytes(4))
+                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+            {
+                imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+                _ => fx.bcx.ins().iconst(types::I32, 0),
+            });
+        }
         "llvm.x86.sse2.pslli.d" => {
             let (a, imm8) = match args {
                 [a, imm8] => (a, imm8),
@@ -137,6 +245,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
                 _ => fx.bcx.ins().iconst(types::I32, 0),
             });
         }
+        "llvm.x86.sse2.psrai.w" => {
+            let (a, imm8) = match args {
+                [a, imm8] => (a, imm8),
+                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+            };
+            let a = codegen_operand(fx, a);
+            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+                .expect("llvm.x86.sse2.psrai.d imm8 not const");
+
+            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+                .try_to_bits(Size::from_bytes(4))
+                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+            {
+                imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+                _ => fx.bcx.ins().iconst(types::I32, 0),
+            });
+        }
         "llvm.x86.sse2.pslli.w" => {
             let (a, imm8) = match args {
                 [a, imm8] => (a, imm8),
@@ -171,6 +296,57 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
                 _ => fx.bcx.ins().iconst(types::I32, 0),
             });
         }
+        "llvm.x86.avx.psrai.d" => {
+            let (a, imm8) = match args {
+                [a, imm8] => (a, imm8),
+                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+            };
+            let a = codegen_operand(fx, a);
+            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+                .expect("llvm.x86.avx.psrai.d imm8 not const");
+
+            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+                .try_to_bits(Size::from_bytes(4))
+                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+            {
+                imm8 if imm8 < 32 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+                _ => fx.bcx.ins().iconst(types::I32, 0),
+            });
+        }
+        "llvm.x86.sse2.psrli.q" => {
+            let (a, imm8) = match args {
+                [a, imm8] => (a, imm8),
+                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+            };
+            let a = codegen_operand(fx, a);
+            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+                .expect("llvm.x86.avx.psrli.q imm8 not const");
+
+            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+                .try_to_bits(Size::from_bytes(4))
+                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+            {
+                imm8 if imm8 < 64 => fx.bcx.ins().ushr_imm(lane, i64::from(imm8 as u8)),
+                _ => fx.bcx.ins().iconst(types::I32, 0),
+            });
+        }
+        "llvm.x86.sse2.pslli.q" => {
+            let (a, imm8) = match args {
+                [a, imm8] => (a, imm8),
+                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+            };
+            let a = codegen_operand(fx, a);
+            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+                .expect("llvm.x86.avx.pslli.q imm8 not const");
+
+            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+                .try_to_bits(Size::from_bytes(4))
+                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+            {
+                imm8 if imm8 < 64 => fx.bcx.ins().ishl_imm(lane, i64::from(imm8 as u8)),
+                _ => fx.bcx.ins().iconst(types::I32, 0),
+            });
+        }
         "llvm.x86.avx.pslli.d" => {
             let (a, imm8) = match args {
                 [a, imm8] => (a, imm8),
@@ -205,6 +381,23 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
                 _ => fx.bcx.ins().iconst(types::I32, 0),
             });
         }
+        "llvm.x86.avx2.psrai.w" => {
+            let (a, imm8) = match args {
+                [a, imm8] => (a, imm8),
+                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+            };
+            let a = codegen_operand(fx, a);
+            let imm8 = crate::constant::mir_operand_get_const_val(fx, imm8)
+                .expect("llvm.x86.avx.psrai.w imm8 not const");
+
+            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| match imm8
+                .try_to_bits(Size::from_bytes(4))
+                .unwrap_or_else(|| panic!("imm8 not scalar: {:?}", imm8))
+            {
+                imm8 if imm8 < 16 => fx.bcx.ins().sshr_imm(lane, i64::from(imm8 as u8)),
+                _ => fx.bcx.ins().iconst(types::I32, 0),
+            });
+        }
         "llvm.x86.avx2.pslli.w" => {
             let (a, imm8) = match args {
                 [a, imm8] => (a, imm8),
@@ -313,7 +506,7 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
             ret.place_lane(fx, 2).to_ptr().store(fx, res_2, MemFlags::trusted());
             ret.place_lane(fx, 3).to_ptr().store(fx, res_3, MemFlags::trusted());
         }
-        "llvm.x86.sse2.storeu.dq" => {
+        "llvm.x86.sse2.storeu.dq" | "llvm.x86.sse2.storeu.pd" => {
             intrinsic_args!(fx, args => (mem_addr, a); intrinsic);
             let mem_addr = mem_addr.load_scalar(fx);
 
@@ -321,17 +514,45 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
             let dest = CPlace::for_ptr(Pointer::new(mem_addr), a.layout());
             dest.write_cvalue(fx, a);
         }
-        "llvm.x86.addcarry.64" => {
+        "llvm.x86.ssse3.pabs.b.128" | "llvm.x86.ssse3.pabs.w.128" | "llvm.x86.ssse3.pabs.d.128" => {
+            let a = match args {
+                [a] => a,
+                _ => bug!("wrong number of args for intrinsic {intrinsic}"),
+            };
+            let a = codegen_operand(fx, a);
+
+            simd_for_each_lane(fx, a, ret, &|fx, _lane_ty, _res_lane_ty, lane| {
+                fx.bcx.ins().iabs(lane)
+            });
+        }
+        "llvm.x86.addcarry.32" | "llvm.x86.addcarry.64" => {
             intrinsic_args!(fx, args => (c_in, a, b); intrinsic);
             let c_in = c_in.load_scalar(fx);
 
-            llvm_add_sub(fx, BinOp::Add, ret, c_in, a, b);
+            let (cb_out, c) = llvm_add_sub(fx, BinOp::Add, c_in, a, b);
+
+            let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, a.layout().ty]));
+            let val = CValue::by_val_pair(cb_out, c, layout);
+            ret.write_cvalue(fx, val);
         }
-        "llvm.x86.subborrow.64" => {
+        "llvm.x86.addcarryx.u32" | "llvm.x86.addcarryx.u64" => {
+            intrinsic_args!(fx, args => (c_in, a, b, out); intrinsic);
+            let c_in = c_in.load_scalar(fx);
+
+            let (cb_out, c) = llvm_add_sub(fx, BinOp::Add, c_in, a, b);
+
+            Pointer::new(out.load_scalar(fx)).store(fx, c, MemFlags::trusted());
+            ret.write_cvalue(fx, CValue::by_val(cb_out, fx.layout_of(fx.tcx.types.u8)));
+        }
+        "llvm.x86.subborrow.32" | "llvm.x86.subborrow.64" => {
             intrinsic_args!(fx, args => (b_in, a, b); intrinsic);
             let b_in = b_in.load_scalar(fx);
 
-            llvm_add_sub(fx, BinOp::Sub, ret, b_in, a, b);
+            let (cb_out, c) = llvm_add_sub(fx, BinOp::Sub, b_in, a, b);
+
+            let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, a.layout().ty]));
+            let val = CValue::by_val_pair(cb_out, c, layout);
+            ret.write_cvalue(fx, val);
         }
         _ => {
             fx.tcx
@@ -356,21 +577,11 @@ pub(crate) fn codegen_x86_llvm_intrinsic_call<'tcx>(
 fn llvm_add_sub<'tcx>(
     fx: &mut FunctionCx<'_, '_, 'tcx>,
     bin_op: BinOp,
-    ret: CPlace<'tcx>,
     cb_in: Value,
     a: CValue<'tcx>,
     b: CValue<'tcx>,
-) {
-    assert_eq!(
-        a.layout().ty,
-        fx.tcx.types.u64,
-        "llvm.x86.addcarry.64/llvm.x86.subborrow.64 second operand must be u64"
-    );
-    assert_eq!(
-        b.layout().ty,
-        fx.tcx.types.u64,
-        "llvm.x86.addcarry.64/llvm.x86.subborrow.64 third operand must be u64"
-    );
+) -> (Value, Value) {
+    assert_eq!(a.layout().ty, b.layout().ty);
 
     // c + carry -> c + first intermediate carry or borrow respectively
     let int0 = crate::num::codegen_checked_int_binop(fx, bin_op, a, b);
@@ -378,15 +589,14 @@ fn llvm_add_sub<'tcx>(
     let cb0 = int0.value_field(fx, FieldIdx::new(1)).load_scalar(fx);
 
     // c + carry -> c + second intermediate carry or borrow respectively
-    let cb_in_as_u64 = fx.bcx.ins().uextend(types::I64, cb_in);
-    let cb_in_as_u64 = CValue::by_val(cb_in_as_u64, fx.layout_of(fx.tcx.types.u64));
-    let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_u64);
+    let clif_ty = fx.clif_type(a.layout().ty).unwrap();
+    let cb_in_as_int = fx.bcx.ins().uextend(clif_ty, cb_in);
+    let cb_in_as_int = CValue::by_val(cb_in_as_int, fx.layout_of(a.layout().ty));
+    let int1 = crate::num::codegen_checked_int_binop(fx, bin_op, c, cb_in_as_int);
     let (c, cb1) = int1.load_scalar_pair(fx);
 
     // carry0 | carry1 -> carry or borrow respectively
     let cb_out = fx.bcx.ins().bor(cb0, cb1);
 
-    let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[fx.tcx.types.u8, fx.tcx.types.u64]));
-    let val = CValue::by_val_pair(cb_out, c, layout);
-    ret.write_cvalue(fx, val);
+    (cb_out, c)
 }
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index da8ab361331..e3006b253b7 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -647,12 +647,13 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
             ret.write_cvalue(fx, val);
         }
-        sym::volatile_store | sym::unaligned_volatile_store => {
+        sym::volatile_store | sym::unaligned_volatile_store | sym::nontemporal_store => {
             intrinsic_args!(fx, args => (ptr, val); intrinsic);
             let ptr = ptr.load_scalar(fx);
 
             // Cranelift treats stores as volatile by default
             // FIXME correctly handle unaligned_volatile_store
+            // FIXME actually do nontemporal stores if requested
             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
             dest.write_cvalue(fx, val);
         }
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
index 0de2dccda71..ebd153cb71d 100644
--- a/compiler/rustc_codegen_cranelift/src/lib.rs
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -268,8 +268,6 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Arc<dyn isa::Tar
     };
     flags_builder.set("tls_model", tls_model).unwrap();
 
-    flags_builder.set("enable_simd", "true").unwrap();
-
     flags_builder.set("enable_llvm_abi_extensions", "true").unwrap();
 
     use rustc_session::config::OptLevel;
diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
index ec0b61a7ce5..ff95141ce90 100644
--- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs
+++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs
@@ -2,6 +2,8 @@
 
 use crate::prelude::*;
 
+use rustc_middle::ty::FnSig;
+
 use cranelift_codegen::entity::EntityRef;
 use cranelift_codegen::ir::immediates::Offset32;
 
@@ -160,6 +162,7 @@ impl<'tcx> CValue<'tcx> {
     }
 
     /// Load a value with layout.abi of scalar
+    #[track_caller]
     pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
         let layout = self.1;
         match self.0 {
@@ -182,6 +185,7 @@ impl<'tcx> CValue<'tcx> {
     }
 
     /// Load a value pair with layout.abi of scalar pair
+    #[track_caller]
     pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
         let layout = self.1;
         match self.0 {
@@ -583,17 +587,25 @@ impl<'tcx> CPlace<'tcx> {
         let dst_layout = self.layout();
         match self.inner {
             CPlaceInner::Var(_local, var) => {
-                let data = CValue(from.0, dst_layout).load_scalar(fx);
+                let data = match from.1.abi {
+                    Abi::Scalar(_) => CValue(from.0, dst_layout).load_scalar(fx),
+                    _ => {
+                        let (ptr, meta) = from.force_stack(fx);
+                        assert!(meta.is_none());
+                        CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar(fx)
+                    }
+                };
                 let dst_ty = fx.clif_type(self.layout().ty).unwrap();
                 transmute_scalar(fx, var, data, dst_ty);
             }
             CPlaceInner::VarPair(_local, var1, var2) => {
-                let (data1, data2) = if from.layout().ty == dst_layout.ty {
-                    CValue(from.0, dst_layout).load_scalar_pair(fx)
-                } else {
-                    let (ptr, meta) = from.force_stack(fx);
-                    assert!(meta.is_none());
-                    CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx)
+                let (data1, data2) = match from.1.abi {
+                    Abi::ScalarPair(_, _) => CValue(from.0, dst_layout).load_scalar_pair(fx),
+                    _ => {
+                        let (ptr, meta) = from.force_stack(fx);
+                        assert!(meta.is_none());
+                        CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx)
+                    }
                 };
                 let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
                 transmute_scalar(fx, var1, data1, dst_ty1);
@@ -607,30 +619,38 @@ impl<'tcx> CPlace<'tcx> {
 
                 let mut flags = MemFlags::new();
                 flags.set_notrap();
-                match from.layout().abi {
-                    Abi::Scalar(_) => {
-                        let val = from.load_scalar(fx);
-                        to_ptr.store(fx, val, flags);
-                        return;
-                    }
-                    Abi::ScalarPair(a_scalar, b_scalar) => {
-                        let (value, extra) = from.load_scalar_pair(fx);
-                        let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
-                        to_ptr.store(fx, value, flags);
-                        to_ptr.offset(fx, b_offset).store(fx, extra, flags);
-                        return;
-                    }
-                    _ => {}
-                }
 
                 match from.0 {
                     CValueInner::ByVal(val) => {
                         to_ptr.store(fx, val, flags);
                     }
-                    CValueInner::ByValPair(_, _) => {
-                        bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
-                    }
+                    CValueInner::ByValPair(val1, val2) => match from.layout().abi {
+                        Abi::ScalarPair(a_scalar, b_scalar) => {
+                            let b_offset =
+                                scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+                            to_ptr.store(fx, val1, flags);
+                            to_ptr.offset(fx, b_offset).store(fx, val2, flags);
+                        }
+                        _ => bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi),
+                    },
                     CValueInner::ByRef(from_ptr, None) => {
+                        match from.layout().abi {
+                            Abi::Scalar(_) => {
+                                let val = from.load_scalar(fx);
+                                to_ptr.store(fx, val, flags);
+                                return;
+                            }
+                            Abi::ScalarPair(a_scalar, b_scalar) => {
+                                let b_offset =
+                                    scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+                                let (val1, val2) = from.load_scalar_pair(fx);
+                                to_ptr.store(fx, val1, flags);
+                                to_ptr.offset(fx, b_offset).store(fx, val2, flags);
+                                return;
+                            }
+                            _ => {}
+                        }
+
                         let from_addr = from_ptr.get_addr(fx);
                         let to_addr = to_ptr.get_addr(fx);
                         let src_layout = from.1;
@@ -815,11 +835,42 @@ pub(crate) fn assert_assignable<'tcx>(
                 ParamEnv::reveal_all(),
                 from_ty.fn_sig(fx.tcx),
             );
+            let FnSig {
+                inputs_and_output: types_from,
+                c_variadic: c_variadic_from,
+                unsafety: unsafety_from,
+                abi: abi_from,
+            } = from_sig;
             let to_sig = fx
                 .tcx
                 .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
+            let FnSig {
+                inputs_and_output: types_to,
+                c_variadic: c_variadic_to,
+                unsafety: unsafety_to,
+                abi: abi_to,
+            } = to_sig;
+            let mut types_from = types_from.iter();
+            let mut types_to = types_to.iter();
+            loop {
+                match (types_from.next(), types_to.next()) {
+                    (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
+                    (None, None) => break,
+                    (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
+                }
+            }
+            assert_eq!(
+                c_variadic_from, c_variadic_to,
+                "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+                from_sig, to_sig, fx,
+            );
+            assert_eq!(
+                unsafety_from, unsafety_to,
+                "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
+                from_sig, to_sig, fx,
+            );
             assert_eq!(
-                from_sig, to_sig,
+                abi_from, abi_to,
                 "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
                 from_sig, to_sig, fx,
             );