about summary refs log tree commit diff
diff options
context:
space:
mode:
authorTrevor Gross <t.gross35@gmail.com>2025-08-08 14:22:44 -0500
committerGitHub <noreply@github.com>2025-08-08 14:22:44 -0500
commit6fa6a854cd791e9efa813e9b18cd1a5337ded971 (patch)
tree5da733187115c0091cfda5db93500cd086151107
parentf5dda197758f7ae94538ef6c2beec21c44a18c7b (diff)
parentde1b999ff6c981475e4491ea2fff1851655587e5 (diff)
downloadrust-6fa6a854cd791e9efa813e9b18cd1a5337ded971.tar.gz
rust-6fa6a854cd791e9efa813e9b18cd1a5337ded971.zip
Rollup merge of #144192 - RalfJung:atomicrmw-ptr, r=nikic
atomicrmw on pointers: move integer-pointer cast hacks into backend

Conceptually, we want to have atomic operations on pointers of the form `fn atomic_add(ptr: *mut T, offset: usize, ...)`. However, LLVM does not directly support such operations (https://github.com/llvm/llvm-project/issues/120837), so we have to cast the `offset` to a pointer somewhere.

This PR moves that hack into the LLVM backend, so that the standard library, intrinsic, and Miri all work with the conceptual operation we actually want. Hopefully, one day LLVM will gain a way to represent these operations without integer-pointer casts, and then the hack will disappear entirely.

Cc ```@nikic``` -- this is the best we can do right now, right?
Fixes https://github.com/rust-lang/rust/issues/134617
-rw-r--r--compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs24
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs7
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs14
-rw-r--r--compiler/rustc_codegen_ssa/messages.ftl2
-rw-r--r--compiler/rustc_codegen_ssa/src/errors.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/intrinsic.rs82
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/builder.rs3
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsic.rs12
-rw-r--r--compiler/rustc_middle/src/ty/generic_args.rs17
-rw-r--r--library/core/src/intrinsics/mod.rs30
-rw-r--r--library/core/src/sync/atomic.rs89
-rw-r--r--src/tools/miri/src/intrinsics/atomic.rs19
-rw-r--r--src/tools/miri/src/operator.rs8
-rw-r--r--tests/codegen-llvm/atomicptr.rs6
-rw-r--r--tests/run-make/atomic-lock-free/atomic_lock_free.rs39
-rw-r--r--tests/ui/intrinsics/intrinsic-atomics.rs12
-rw-r--r--tests/ui/intrinsics/non-integer-atomic.rs32
-rw-r--r--tests/ui/intrinsics/non-integer-atomic.stderr32
18 files changed, 255 insertions, 181 deletions
diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
index 4ff5773a06c..ed40901ac9b 100644
--- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
+++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
@@ -969,7 +969,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
 
             let layout = amount.layout();
             match layout.ty.kind() {
-                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                ty::Uint(_) | ty::Int(_) => {}
                 _ => {
                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
                     return Ok(());
@@ -982,7 +982,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old =
                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
 
-            let old = CValue::by_val(old, layout);
+            let old = CValue::by_val(old, ret.layout());
             ret.write_cvalue(fx, old);
         }
         sym::atomic_xsub => {
@@ -991,7 +991,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
 
             let layout = amount.layout();
             match layout.ty.kind() {
-                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                ty::Uint(_) | ty::Int(_) => {}
                 _ => {
                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
                     return Ok(());
@@ -1004,7 +1004,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
             let old =
                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
 
-            let old = CValue::by_val(old, layout);
+            let old = CValue::by_val(old, ret.layout());
             ret.write_cvalue(fx, old);
         }
         sym::atomic_and => {
@@ -1013,7 +1013,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
 
             let layout = src.layout();
             match layout.ty.kind() {
-                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                ty::Uint(_) | ty::Int(_) => {}
                 _ => {
                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
                     return Ok(());
@@ -1025,7 +1025,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
 
             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
 
-            let old = CValue::by_val(old, layout);
+            let old = CValue::by_val(old, ret.layout());
             ret.write_cvalue(fx, old);
         }
         sym::atomic_or => {
@@ -1034,7 +1034,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
 
             let layout = src.layout();
             match layout.ty.kind() {
-                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                ty::Uint(_) | ty::Int(_) => {}
                 _ => {
                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
                     return Ok(());
@@ -1046,7 +1046,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
 
             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
 
-            let old = CValue::by_val(old, layout);
+            let old = CValue::by_val(old, ret.layout());
             ret.write_cvalue(fx, old);
         }
         sym::atomic_xor => {
@@ -1055,7 +1055,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
 
             let layout = src.layout();
             match layout.ty.kind() {
-                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                ty::Uint(_) | ty::Int(_) => {}
                 _ => {
                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
                     return Ok(());
@@ -1067,7 +1067,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
 
             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
 
-            let old = CValue::by_val(old, layout);
+            let old = CValue::by_val(old, ret.layout());
             ret.write_cvalue(fx, old);
         }
         sym::atomic_nand => {
@@ -1076,7 +1076,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
 
             let layout = src.layout();
             match layout.ty.kind() {
-                ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
+                ty::Uint(_) | ty::Int(_) => {}
                 _ => {
                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
                     return Ok(());
@@ -1088,7 +1088,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
 
             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
 
-            let old = CValue::by_val(old, layout);
+            let old = CValue::by_val(old, ret.layout());
             ret.write_cvalue(fx, old);
         }
         sym::atomic_max => {
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 34ade3d025f..f7a7a3f8c7e 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -1671,6 +1671,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         dst: RValue<'gcc>,
         src: RValue<'gcc>,
         order: AtomicOrdering,
+        ret_ptr: bool,
     ) -> RValue<'gcc> {
         let size = get_maybe_pointer_size(src);
         let name = match op {
@@ -1698,6 +1699,9 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let atomic_function = self.context.get_builtin_function(name);
         let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
 
+        // FIXME: If `ret_ptr` is true and `src` is an integer, we should really tell GCC
+        // that this is a pointer operation that needs to preserve provenance -- but like LLVM,
+        // GCC does not currently seems to support that.
         let void_ptr_type = self.context.new_type::<*mut ()>();
         let volatile_void_ptr_type = void_ptr_type.make_volatile();
         let dst = self.context.new_cast(self.location, dst, volatile_void_ptr_type);
@@ -1705,7 +1709,8 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
         let src = self.context.new_bitcast(self.location, src, new_src_type);
         let res = self.context.new_call(self.location, atomic_function, &[dst, src, order]);
-        self.context.new_cast(self.location, res, src.get_type())
+        let res_type = if ret_ptr { void_ptr_type } else { src.get_type() };
+        self.context.new_cast(self.location, res, res_type)
     }
 
     fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index bd175e560c7..32cdef075e7 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -1327,15 +1327,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         &mut self,
         op: rustc_codegen_ssa::common::AtomicRmwBinOp,
         dst: &'ll Value,
-        mut src: &'ll Value,
+        src: &'ll Value,
         order: rustc_middle::ty::AtomicOrdering,
+        ret_ptr: bool,
     ) -> &'ll Value {
-        // The only RMW operation that LLVM supports on pointers is compare-exchange.
-        let requires_cast_to_int = self.val_ty(src) == self.type_ptr()
-            && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg;
-        if requires_cast_to_int {
-            src = self.ptrtoint(src, self.type_isize());
-        }
+        // FIXME: If `ret_ptr` is true and `src` is not a pointer, we *should* tell LLVM that the
+        // LHS is a pointer and the operation should be provenance-preserving, but LLVM does not
+        // currently support that (https://github.com/llvm/llvm-project/issues/120837).
         let mut res = unsafe {
             llvm::LLVMBuildAtomicRMW(
                 self.llbuilder,
@@ -1346,7 +1344,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 llvm::False, // SingleThreaded
             )
         };
-        if requires_cast_to_int {
+        if ret_ptr && self.val_ty(res) != self.type_ptr() {
             res = self.inttoptr(res, self.type_ptr());
         }
         res
diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl
index a70d0011d16..36ad5ede7c2 100644
--- a/compiler/rustc_codegen_ssa/messages.ftl
+++ b/compiler/rustc_codegen_ssa/messages.ftl
@@ -101,6 +101,8 @@ codegen_ssa_invalid_monomorphization_basic_float_type = invalid monomorphization
 
 codegen_ssa_invalid_monomorphization_basic_integer_type = invalid monomorphization of `{$name}` intrinsic: expected basic integer type, found `{$ty}`
 
+codegen_ssa_invalid_monomorphization_basic_integer_or_ptr_type = invalid monomorphization of `{$name}` intrinsic: expected basic integer or pointer type, found `{$ty}`
+
 codegen_ssa_invalid_monomorphization_cannot_return = invalid monomorphization of `{$name}` intrinsic: cannot return `{$ret_ty}`, expected `u{$expected_int_bits}` or `[u8; {$expected_bytes}]`
 
 codegen_ssa_invalid_monomorphization_cast_wide_pointer = invalid monomorphization of `{$name}` intrinsic: cannot cast wide pointer `{$ty}`
diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs
index 3d787d8bdbd..af4adcd1954 100644
--- a/compiler/rustc_codegen_ssa/src/errors.rs
+++ b/compiler/rustc_codegen_ssa/src/errors.rs
@@ -764,6 +764,14 @@ pub enum InvalidMonomorphization<'tcx> {
         ty: Ty<'tcx>,
     },
 
+    #[diag(codegen_ssa_invalid_monomorphization_basic_integer_or_ptr_type, code = E0511)]
+    BasicIntegerOrPtrType {
+        #[primary_span]
+        span: Span,
+        name: Symbol,
+        ty: Ty<'tcx>,
+    },
+
     #[diag(codegen_ssa_invalid_monomorphization_basic_float_type, code = E0511)]
     BasicFloatType {
         #[primary_span]
diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
index fc95f62b4a4..3c667b8e882 100644
--- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs
@@ -92,6 +92,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         let invalid_monomorphization_int_type = |ty| {
             bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
         };
+        let invalid_monomorphization_int_or_ptr_type = |ty| {
+            bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerOrPtrType {
+                span,
+                name,
+                ty,
+            });
+        };
 
         let parse_atomic_ordering = |ord: ty::Value<'tcx>| {
             let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
@@ -351,7 +358,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             sym::atomic_load => {
                 let ty = fn_args.type_at(0);
                 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
-                    invalid_monomorphization_int_type(ty);
+                    invalid_monomorphization_int_or_ptr_type(ty);
                     return Ok(());
                 }
                 let ordering = fn_args.const_at(1).to_value();
@@ -367,7 +374,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
             sym::atomic_store => {
                 let ty = fn_args.type_at(0);
                 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
-                    invalid_monomorphization_int_type(ty);
+                    invalid_monomorphization_int_or_ptr_type(ty);
                     return Ok(());
                 }
                 let ordering = fn_args.const_at(1).to_value();
@@ -377,10 +384,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                 bx.atomic_store(val, ptr, parse_atomic_ordering(ordering), size);
                 return Ok(());
             }
+            // These are all AtomicRMW ops
             sym::atomic_cxchg | sym::atomic_cxchgweak => {
                 let ty = fn_args.type_at(0);
                 if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
-                    invalid_monomorphization_int_type(ty);
+                    invalid_monomorphization_int_or_ptr_type(ty);
                     return Ok(());
                 }
                 let succ_ordering = fn_args.const_at(1).to_value();
@@ -407,7 +415,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
 
                 return Ok(());
             }
-            // These are all AtomicRMW ops
             sym::atomic_max | sym::atomic_min => {
                 let atom_op = if name == sym::atomic_max {
                     AtomicRmwBinOp::AtomicMax
@@ -420,7 +427,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     let ordering = fn_args.const_at(1).to_value();
                     let ptr = args[0].immediate();
                     let val = args[1].immediate();
-                    bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
+                    bx.atomic_rmw(
+                        atom_op,
+                        ptr,
+                        val,
+                        parse_atomic_ordering(ordering),
+                        /* ret_ptr */ false,
+                    )
                 } else {
                     invalid_monomorphization_int_type(ty);
                     return Ok(());
@@ -438,21 +451,44 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     let ordering = fn_args.const_at(1).to_value();
                     let ptr = args[0].immediate();
                     let val = args[1].immediate();
-                    bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
+                    bx.atomic_rmw(
+                        atom_op,
+                        ptr,
+                        val,
+                        parse_atomic_ordering(ordering),
+                        /* ret_ptr */ false,
+                    )
                 } else {
                     invalid_monomorphization_int_type(ty);
                     return Ok(());
                 }
             }
-            sym::atomic_xchg
-            | sym::atomic_xadd
+            sym::atomic_xchg => {
+                let ty = fn_args.type_at(0);
+                let ordering = fn_args.const_at(1).to_value();
+                if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
+                    let ptr = args[0].immediate();
+                    let val = args[1].immediate();
+                    let atomic_op = AtomicRmwBinOp::AtomicXchg;
+                    bx.atomic_rmw(
+                        atomic_op,
+                        ptr,
+                        val,
+                        parse_atomic_ordering(ordering),
+                        /* ret_ptr */ ty.is_raw_ptr(),
+                    )
+                } else {
+                    invalid_monomorphization_int_or_ptr_type(ty);
+                    return Ok(());
+                }
+            }
+            sym::atomic_xadd
             | sym::atomic_xsub
             | sym::atomic_and
             | sym::atomic_nand
             | sym::atomic_or
             | sym::atomic_xor => {
                 let atom_op = match name {
-                    sym::atomic_xchg => AtomicRmwBinOp::AtomicXchg,
                     sym::atomic_xadd => AtomicRmwBinOp::AtomicAdd,
                     sym::atomic_xsub => AtomicRmwBinOp::AtomicSub,
                     sym::atomic_and => AtomicRmwBinOp::AtomicAnd,
@@ -462,14 +498,28 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     _ => unreachable!(),
                 };
 
-                let ty = fn_args.type_at(0);
-                if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
-                    let ordering = fn_args.const_at(1).to_value();
-                    let ptr = args[0].immediate();
-                    let val = args[1].immediate();
-                    bx.atomic_rmw(atom_op, ptr, val, parse_atomic_ordering(ordering))
+                // The type of the in-memory data.
+                let ty_mem = fn_args.type_at(0);
+                // The type of the 2nd operand, given by-value.
+                let ty_op = fn_args.type_at(1);
+
+                let ordering = fn_args.const_at(2).to_value();
+                // We require either both arguments to have the same integer type, or the first to
+                // be a pointer and the second to be `usize`.
+                if (int_type_width_signed(ty_mem, bx.tcx()).is_some() && ty_op == ty_mem)
+                    || (ty_mem.is_raw_ptr() && ty_op == bx.tcx().types.usize)
+                {
+                    let ptr = args[0].immediate(); // of type "pointer to `ty_mem`"
+                    let val = args[1].immediate(); // of type `ty_op`
+                    bx.atomic_rmw(
+                        atom_op,
+                        ptr,
+                        val,
+                        parse_atomic_ordering(ordering),
+                        /* ret_ptr */ ty_mem.is_raw_ptr(),
+                    )
                 } else {
-                    invalid_monomorphization_int_type(ty);
+                    invalid_monomorphization_int_or_ptr_type(ty_mem);
                     return Ok(());
                 }
             }
diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs
index 4b18146863b..f417d1a7bf7 100644
--- a/compiler/rustc_codegen_ssa/src/traits/builder.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs
@@ -548,12 +548,15 @@ pub trait BuilderMethods<'a, 'tcx>:
         failure_order: AtomicOrdering,
         weak: bool,
     ) -> (Self::Value, Self::Value);
+    /// `ret_ptr` indicates whether the return type (which is also the type `dst` points to)
+    /// is a pointer or the same type as `src`.
     fn atomic_rmw(
         &mut self,
         op: AtomicRmwBinOp,
         dst: Self::Value,
         src: Self::Value,
         order: AtomicOrdering,
+        ret_ptr: bool,
     ) -> Self::Value;
     fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope);
     fn set_invariant_load(&mut self, load: Self::Value);
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
index 6e5fe3823ab..4441dd6ebd6 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
@@ -652,16 +652,16 @@ pub(crate) fn check_intrinsic_type(
         sym::atomic_store => (1, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit),
 
         sym::atomic_xchg
-        | sym::atomic_xadd
-        | sym::atomic_xsub
-        | sym::atomic_and
-        | sym::atomic_nand
-        | sym::atomic_or
-        | sym::atomic_xor
         | sym::atomic_max
         | sym::atomic_min
         | sym::atomic_umax
         | sym::atomic_umin => (1, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
+        sym::atomic_xadd
+        | sym::atomic_xsub
+        | sym::atomic_and
+        | sym::atomic_nand
+        | sym::atomic_or
+        | sym::atomic_xor => (2, 1, vec![Ty::new_mut_ptr(tcx, param(0)), param(1)], param(0)),
         sym::atomic_fence | sym::atomic_singlethreadfence => (0, 1, Vec::new(), tcx.types.unit),
 
         other => {
diff --git a/compiler/rustc_middle/src/ty/generic_args.rs b/compiler/rustc_middle/src/ty/generic_args.rs
index b02abb5ab43..648709e88e6 100644
--- a/compiler/rustc_middle/src/ty/generic_args.rs
+++ b/compiler/rustc_middle/src/ty/generic_args.rs
@@ -527,21 +527,28 @@ impl<'tcx> GenericArgs<'tcx> {
     #[inline]
     #[track_caller]
     pub fn type_at(&self, i: usize) -> Ty<'tcx> {
-        self[i].as_type().unwrap_or_else(|| bug!("expected type for param #{} in {:?}", i, self))
+        self[i].as_type().unwrap_or_else(
+            #[track_caller]
+            || bug!("expected type for param #{} in {:?}", i, self),
+        )
     }
 
     #[inline]
     #[track_caller]
     pub fn region_at(&self, i: usize) -> ty::Region<'tcx> {
-        self[i]
-            .as_region()
-            .unwrap_or_else(|| bug!("expected region for param #{} in {:?}", i, self))
+        self[i].as_region().unwrap_or_else(
+            #[track_caller]
+            || bug!("expected region for param #{} in {:?}", i, self),
+        )
     }
 
     #[inline]
     #[track_caller]
     pub fn const_at(&self, i: usize) -> ty::Const<'tcx> {
-        self[i].as_const().unwrap_or_else(|| bug!("expected const for param #{} in {:?}", i, self))
+        self[i].as_const().unwrap_or_else(
+            #[track_caller]
+            || bug!("expected const for param #{} in {:?}", i, self),
+        )
     }
 
     #[inline]
diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs
index 05b0522c202..7228ad0ed6d 100644
--- a/library/core/src/intrinsics/mod.rs
+++ b/library/core/src/intrinsics/mod.rs
@@ -150,69 +150,63 @@ pub unsafe fn atomic_xchg<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src:
 
 /// Adds to the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`atomic`] types via the `fetch_add` method. For example, [`AtomicIsize::fetch_add`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_xadd<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_xadd<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Subtract from the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`atomic`] types via the `fetch_sub` method. For example, [`AtomicIsize::fetch_sub`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_xsub<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_xsub<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Bitwise and with the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`atomic`] types via the `fetch_and` method. For example, [`AtomicBool::fetch_and`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_and<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_and<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Bitwise nand with the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`AtomicBool`] type via the `fetch_nand` method. For example, [`AtomicBool::fetch_nand`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_nand<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_nand<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Bitwise or with the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`atomic`] types via the `fetch_or` method. For example, [`AtomicBool::fetch_or`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_or<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_or<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Bitwise xor with the current value, returning the previous value.
 /// `T` must be an integer or pointer type.
-/// If `T` is a pointer type, the provenance of `src` is ignored: both the return value and the new
-/// value stored at `*dst` will have the provenance of the old value stored there.
+/// `U` must be the same as `T` if that is an integer type, or `usize` if `T` is a pointer type.
 ///
 /// The stabilized version of this intrinsic is available on the
 /// [`atomic`] types via the `fetch_xor` method. For example, [`AtomicBool::fetch_xor`].
 #[rustc_intrinsic]
 #[rustc_nounwind]
-pub unsafe fn atomic_xor<T: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+pub unsafe fn atomic_xor<T: Copy, U: Copy, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 /// Maximum with the current value using a signed comparison.
 /// `T` must be a signed integer type.
diff --git a/library/core/src/sync/atomic.rs b/library/core/src/sync/atomic.rs
index 70c02ead358..44a6895f90a 100644
--- a/library/core/src/sync/atomic.rs
+++ b/library/core/src/sync/atomic.rs
@@ -2293,7 +2293,7 @@ impl<T> AtomicPtr<T> {
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
         // SAFETY: data races are prevented by atomic intrinsics.
-        unsafe { atomic_add(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
+        unsafe { atomic_add(self.p.get(), val, order).cast() }
     }
 
     /// Offsets the pointer's address by subtracting `val` *bytes*, returning the
@@ -2318,9 +2318,10 @@ impl<T> AtomicPtr<T> {
     /// #![feature(strict_provenance_atomic_ptr)]
     /// use core::sync::atomic::{AtomicPtr, Ordering};
     ///
-    /// let atom = AtomicPtr::<i64>::new(core::ptr::without_provenance_mut(1));
-    /// assert_eq!(atom.fetch_byte_sub(1, Ordering::Relaxed).addr(), 1);
-    /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 0);
+    /// let mut arr = [0i64, 1];
+    /// let atom = AtomicPtr::<i64>::new(&raw mut arr[1]);
+    /// assert_eq!(atom.fetch_byte_sub(8, Ordering::Relaxed).addr(), (&raw const arr[1]).addr());
+    /// assert_eq!(atom.load(Ordering::Relaxed).addr(), (&raw const arr[0]).addr());
     /// ```
     #[inline]
     #[cfg(target_has_atomic = "ptr")]
@@ -2328,7 +2329,7 @@ impl<T> AtomicPtr<T> {
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
         // SAFETY: data races are prevented by atomic intrinsics.
-        unsafe { atomic_sub(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
+        unsafe { atomic_sub(self.p.get(), val, order).cast() }
     }
 
     /// Performs a bitwise "or" operation on the address of the current pointer,
@@ -2379,7 +2380,7 @@ impl<T> AtomicPtr<T> {
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
         // SAFETY: data races are prevented by atomic intrinsics.
-        unsafe { atomic_or(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
+        unsafe { atomic_or(self.p.get(), val, order).cast() }
     }
 
     /// Performs a bitwise "and" operation on the address of the current
@@ -2429,7 +2430,7 @@ impl<T> AtomicPtr<T> {
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
         // SAFETY: data races are prevented by atomic intrinsics.
-        unsafe { atomic_and(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
+        unsafe { atomic_and(self.p.get(), val, order).cast() }
     }
 
     /// Performs a bitwise "xor" operation on the address of the current
@@ -2477,7 +2478,7 @@ impl<T> AtomicPtr<T> {
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
         // SAFETY: data races are prevented by atomic intrinsics.
-        unsafe { atomic_xor(self.p.get(), core::ptr::without_provenance_mut(val), order).cast() }
+        unsafe { atomic_xor(self.p.get(), val, order).cast() }
     }
 
     /// Returns a mutable pointer to the underlying pointer.
@@ -3981,15 +3982,15 @@ unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_add<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_add`.
     unsafe {
         match order {
-            Relaxed => intrinsics::atomic_xadd::<T, { AO::Relaxed }>(dst, val),
-            Acquire => intrinsics::atomic_xadd::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_xadd::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_xadd::<T, { AO::AcqRel }>(dst, val),
-            SeqCst => intrinsics::atomic_xadd::<T, { AO::SeqCst }>(dst, val),
+            Relaxed => intrinsics::atomic_xadd::<T, U, { AO::Relaxed }>(dst, val),
+            Acquire => intrinsics::atomic_xadd::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_xadd::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_xadd::<T, U, { AO::AcqRel }>(dst, val),
+            SeqCst => intrinsics::atomic_xadd::<T, U, { AO::SeqCst }>(dst, val),
         }
     }
 }
@@ -3998,15 +3999,15 @@ unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_sub<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
     unsafe {
         match order {
-            Relaxed => intrinsics::atomic_xsub::<T, { AO::Relaxed }>(dst, val),
-            Acquire => intrinsics::atomic_xsub::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_xsub::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_xsub::<T, { AO::AcqRel }>(dst, val),
-            SeqCst => intrinsics::atomic_xsub::<T, { AO::SeqCst }>(dst, val),
+            Relaxed => intrinsics::atomic_xsub::<T, U, { AO::Relaxed }>(dst, val),
+            Acquire => intrinsics::atomic_xsub::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_xsub::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_xsub::<T, U, { AO::AcqRel }>(dst, val),
+            SeqCst => intrinsics::atomic_xsub::<T, U, { AO::SeqCst }>(dst, val),
         }
     }
 }
@@ -4147,15 +4148,15 @@ unsafe fn atomic_compare_exchange_weak<T: Copy>(
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_and<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_and`
     unsafe {
         match order {
-            Relaxed => intrinsics::atomic_and::<T, { AO::Relaxed }>(dst, val),
-            Acquire => intrinsics::atomic_and::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_and::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_and::<T, { AO::AcqRel }>(dst, val),
-            SeqCst => intrinsics::atomic_and::<T, { AO::SeqCst }>(dst, val),
+            Relaxed => intrinsics::atomic_and::<T, U, { AO::Relaxed }>(dst, val),
+            Acquire => intrinsics::atomic_and::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_and::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_and::<T, U, { AO::AcqRel }>(dst, val),
+            SeqCst => intrinsics::atomic_and::<T, U, { AO::SeqCst }>(dst, val),
         }
     }
 }
@@ -4163,15 +4164,15 @@ unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_nand<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_nand`
     unsafe {
         match order {
-            Relaxed => intrinsics::atomic_nand::<T, { AO::Relaxed }>(dst, val),
-            Acquire => intrinsics::atomic_nand::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_nand::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_nand::<T, { AO::AcqRel }>(dst, val),
-            SeqCst => intrinsics::atomic_nand::<T, { AO::SeqCst }>(dst, val),
+            Relaxed => intrinsics::atomic_nand::<T, U, { AO::Relaxed }>(dst, val),
+            Acquire => intrinsics::atomic_nand::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_nand::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_nand::<T, U, { AO::AcqRel }>(dst, val),
+            SeqCst => intrinsics::atomic_nand::<T, U, { AO::SeqCst }>(dst, val),
         }
     }
 }
@@ -4179,15 +4180,15 @@ unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_or<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_or`
     unsafe {
         match order {
-            SeqCst => intrinsics::atomic_or::<T, { AO::SeqCst }>(dst, val),
-            Acquire => intrinsics::atomic_or::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_or::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_or::<T, { AO::AcqRel }>(dst, val),
-            Relaxed => intrinsics::atomic_or::<T, { AO::Relaxed }>(dst, val),
+            SeqCst => intrinsics::atomic_or::<T, U, { AO::SeqCst }>(dst, val),
+            Acquire => intrinsics::atomic_or::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_or::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_or::<T, U, { AO::AcqRel }>(dst, val),
+            Relaxed => intrinsics::atomic_or::<T, U, { AO::Relaxed }>(dst, val),
         }
     }
 }
@@ -4195,15 +4196,15 @@ unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic)]
 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
-unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
+unsafe fn atomic_xor<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
     // SAFETY: the caller must uphold the safety contract for `atomic_xor`
     unsafe {
         match order {
-            SeqCst => intrinsics::atomic_xor::<T, { AO::SeqCst }>(dst, val),
-            Acquire => intrinsics::atomic_xor::<T, { AO::Acquire }>(dst, val),
-            Release => intrinsics::atomic_xor::<T, { AO::Release }>(dst, val),
-            AcqRel => intrinsics::atomic_xor::<T, { AO::AcqRel }>(dst, val),
-            Relaxed => intrinsics::atomic_xor::<T, { AO::Relaxed }>(dst, val),
+            SeqCst => intrinsics::atomic_xor::<T, U, { AO::SeqCst }>(dst, val),
+            Acquire => intrinsics::atomic_xor::<T, U, { AO::Acquire }>(dst, val),
+            Release => intrinsics::atomic_xor::<T, U, { AO::Release }>(dst, val),
+            AcqRel => intrinsics::atomic_xor::<T, U, { AO::AcqRel }>(dst, val),
+            Relaxed => intrinsics::atomic_xor::<T, U, { AO::Relaxed }>(dst, val),
         }
     }
 }
diff --git a/src/tools/miri/src/intrinsics/atomic.rs b/src/tools/miri/src/intrinsics/atomic.rs
index bcc3e9ec885..e6341252927 100644
--- a/src/tools/miri/src/intrinsics/atomic.rs
+++ b/src/tools/miri/src/intrinsics/atomic.rs
@@ -105,27 +105,27 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             }
 
             "or" => {
-                let ord = get_ord_at(1);
+                let ord = get_ord_at(2);
                 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitOr, false), rw_ord(ord))?;
             }
             "xor" => {
-                let ord = get_ord_at(1);
+                let ord = get_ord_at(2);
                 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitXor, false), rw_ord(ord))?;
             }
             "and" => {
-                let ord = get_ord_at(1);
+                let ord = get_ord_at(2);
                 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, false), rw_ord(ord))?;
             }
             "nand" => {
-                let ord = get_ord_at(1);
+                let ord = get_ord_at(2);
                 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::BitAnd, true), rw_ord(ord))?;
             }
             "xadd" => {
-                let ord = get_ord_at(1);
+                let ord = get_ord_at(2);
                 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::Add, false), rw_ord(ord))?;
             }
             "xsub" => {
-                let ord = get_ord_at(1);
+                let ord = get_ord_at(2);
                 this.atomic_rmw_op(args, dest, AtomicOp::MirOp(BinOp::Sub, false), rw_ord(ord))?;
             }
             "min" => {
@@ -231,15 +231,14 @@ trait EvalContextPrivExt<'tcx>: MiriInterpCxExt<'tcx> {
         let place = this.deref_pointer(place)?;
         let rhs = this.read_immediate(rhs)?;
 
-        if !place.layout.ty.is_integral() && !place.layout.ty.is_raw_ptr() {
+        if !(place.layout.ty.is_integral() || place.layout.ty.is_raw_ptr())
+            || !(rhs.layout.ty.is_integral() || rhs.layout.ty.is_raw_ptr())
+        {
             span_bug!(
                 this.cur_span(),
                 "atomic arithmetic operations only work on integer and raw pointer types",
             );
         }
-        if rhs.layout.ty != place.layout.ty {
-            span_bug!(this.cur_span(), "atomic arithmetic operation type mismatch");
-        }
 
         let old = match atomic_op {
             AtomicOp::Min =>
diff --git a/src/tools/miri/src/operator.rs b/src/tools/miri/src/operator.rs
index 73d671121f6..3c3f2c28535 100644
--- a/src/tools/miri/src/operator.rs
+++ b/src/tools/miri/src/operator.rs
@@ -50,17 +50,13 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> {
             }
 
             // Some more operations are possible with atomics.
-            // The return value always has the provenance of the *left* operand.
+            // The RHS must be `usize`.
             Add | Sub | BitOr | BitAnd | BitXor => {
                 assert!(left.layout.ty.is_raw_ptr());
-                assert!(right.layout.ty.is_raw_ptr());
+                assert_eq!(right.layout.ty, this.tcx.types.usize);
                 let ptr = left.to_scalar().to_pointer(this)?;
                 // We do the actual operation with usize-typed scalars.
                 let left = ImmTy::from_uint(ptr.addr().bytes(), this.machine.layouts.usize);
-                let right = ImmTy::from_uint(
-                    right.to_scalar().to_target_usize(this)?,
-                    this.machine.layouts.usize,
-                );
                 let result = this.binary_op(bin_op, &left, &right)?;
                 // Construct a new pointer with the provenance of `ptr` (the LHS).
                 let result_ptr = Pointer::new(
diff --git a/tests/codegen-llvm/atomicptr.rs b/tests/codegen-llvm/atomicptr.rs
index 4819af40ca2..ce6c4aa0d2b 100644
--- a/tests/codegen-llvm/atomicptr.rs
+++ b/tests/codegen-llvm/atomicptr.rs
@@ -1,5 +1,5 @@
 // LLVM does not support some atomic RMW operations on pointers, so inside codegen we lower those
-// to integer atomics, surrounded by casts to and from integer type.
+// to integer atomics, followed by an inttoptr cast.
 // This test ensures that we do the round-trip correctly for AtomicPtr::fetch_byte_add, and also
 // ensures that we do not have such a round-trip for AtomicPtr::swap, because LLVM supports pointer
 // arguments to `atomicrmw xchg`.
@@ -20,8 +20,8 @@ pub fn helper(_: usize) {}
 // CHECK-LABEL: @atomicptr_fetch_byte_add
 #[no_mangle]
 pub fn atomicptr_fetch_byte_add(a: &AtomicPtr<u8>, v: usize) -> *mut u8 {
-    // CHECK: %[[INTPTR:.*]] = ptrtoint ptr %{{.*}} to [[USIZE]]
-    // CHECK-NEXT: %[[RET:.*]] = atomicrmw add ptr %{{.*}}, [[USIZE]] %[[INTPTR]]
+    // CHECK: llvm.lifetime.start
+    // CHECK-NEXT: %[[RET:.*]] = atomicrmw add ptr %{{.*}}, [[USIZE]] %v
     // CHECK-NEXT: inttoptr [[USIZE]] %[[RET]] to ptr
     a.fetch_byte_add(v, Relaxed)
 }
diff --git a/tests/run-make/atomic-lock-free/atomic_lock_free.rs b/tests/run-make/atomic-lock-free/atomic_lock_free.rs
index f5c3b360ee8..92ffd111ce8 100644
--- a/tests/run-make/atomic-lock-free/atomic_lock_free.rs
+++ b/tests/run-make/atomic-lock-free/atomic_lock_free.rs
@@ -14,7 +14,7 @@ pub enum AtomicOrdering {
 }
 
 #[rustc_intrinsic]
-unsafe fn atomic_xadd<T, const ORD: AtomicOrdering>(dst: *mut T, src: T) -> T;
+unsafe fn atomic_xadd<T, U, const ORD: AtomicOrdering>(dst: *mut T, src: U) -> T;
 
 #[lang = "pointee_sized"]
 pub trait PointeeSized {}
@@ -35,51 +35,62 @@ impl<T: ?Sized> Copy for *mut T {}
 impl ConstParamTy for AtomicOrdering {}
 
 #[cfg(target_has_atomic = "8")]
+#[unsafe(no_mangle)] // let's make sure we actually generate a symbol to check
 pub unsafe fn atomic_u8(x: *mut u8) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1u8);
 }
 #[cfg(target_has_atomic = "8")]
+#[unsafe(no_mangle)]
 pub unsafe fn atomic_i8(x: *mut i8) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1i8);
 }
 #[cfg(target_has_atomic = "16")]
+#[unsafe(no_mangle)]
 pub unsafe fn atomic_u16(x: *mut u16) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1u16);
 }
 #[cfg(target_has_atomic = "16")]
+#[unsafe(no_mangle)]
 pub unsafe fn atomic_i16(x: *mut i16) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1i16);
 }
 #[cfg(target_has_atomic = "32")]
+#[unsafe(no_mangle)]
 pub unsafe fn atomic_u32(x: *mut u32) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1u32);
 }
 #[cfg(target_has_atomic = "32")]
+#[unsafe(no_mangle)]
 pub unsafe fn atomic_i32(x: *mut i32) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1i32);
 }
 #[cfg(target_has_atomic = "64")]
+#[unsafe(no_mangle)]
 pub unsafe fn atomic_u64(x: *mut u64) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1u64);
 }
 #[cfg(target_has_atomic = "64")]
+#[unsafe(no_mangle)]
 pub unsafe fn atomic_i64(x: *mut i64) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1i64);
 }
 #[cfg(target_has_atomic = "128")]
+#[unsafe(no_mangle)]
 pub unsafe fn atomic_u128(x: *mut u128) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1u128);
 }
 #[cfg(target_has_atomic = "128")]
+#[unsafe(no_mangle)]
 pub unsafe fn atomic_i128(x: *mut i128) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1i128);
 }
 #[cfg(target_has_atomic = "ptr")]
+#[unsafe(no_mangle)]
 pub unsafe fn atomic_usize(x: *mut usize) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1usize);
 }
 #[cfg(target_has_atomic = "ptr")]
+#[unsafe(no_mangle)]
 pub unsafe fn atomic_isize(x: *mut isize) {
-    atomic_xadd::<_, { AtomicOrdering::SeqCst }>(x, 1);
+    atomic_xadd::<_, _, { AtomicOrdering::SeqCst }>(x, 1isize);
 }
diff --git a/tests/ui/intrinsics/intrinsic-atomics.rs b/tests/ui/intrinsics/intrinsic-atomics.rs
index 2275aafff83..c19948137db 100644
--- a/tests/ui/intrinsics/intrinsic-atomics.rs
+++ b/tests/ui/intrinsics/intrinsic-atomics.rs
@@ -33,14 +33,14 @@ pub fn main() {
         assert_eq!(rusti::atomic_xchg::<_, { Release }>(&mut *x, 0), 1);
         assert_eq!(*x, 0);
 
-        assert_eq!(rusti::atomic_xadd::<_, { SeqCst }>(&mut *x, 1), 0);
-        assert_eq!(rusti::atomic_xadd::<_, { Acquire }>(&mut *x, 1), 1);
-        assert_eq!(rusti::atomic_xadd::<_, { Release }>(&mut *x, 1), 2);
+        assert_eq!(rusti::atomic_xadd::<_, _, { SeqCst }>(&mut *x, 1), 0);
+        assert_eq!(rusti::atomic_xadd::<_, _, { Acquire }>(&mut *x, 1), 1);
+        assert_eq!(rusti::atomic_xadd::<_, _, { Release }>(&mut *x, 1), 2);
         assert_eq!(*x, 3);
 
-        assert_eq!(rusti::atomic_xsub::<_, { SeqCst }>(&mut *x, 1), 3);
-        assert_eq!(rusti::atomic_xsub::<_, { Acquire }>(&mut *x, 1), 2);
-        assert_eq!(rusti::atomic_xsub::<_, { Release }>(&mut *x, 1), 1);
+        assert_eq!(rusti::atomic_xsub::<_, _, { SeqCst }>(&mut *x, 1), 3);
+        assert_eq!(rusti::atomic_xsub::<_, _, { Acquire }>(&mut *x, 1), 2);
+        assert_eq!(rusti::atomic_xsub::<_, _, { Release }>(&mut *x, 1), 1);
         assert_eq!(*x, 0);
 
         loop {
diff --git a/tests/ui/intrinsics/non-integer-atomic.rs b/tests/ui/intrinsics/non-integer-atomic.rs
index 853c163710f..30f713f1241 100644
--- a/tests/ui/intrinsics/non-integer-atomic.rs
+++ b/tests/ui/intrinsics/non-integer-atomic.rs
@@ -13,80 +13,80 @@ pub type Quux = [u8; 100];
 
 pub unsafe fn test_bool_load(p: &mut bool, v: bool) {
     intrinsics::atomic_load::<_, { SeqCst }>(p);
-    //~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `bool`
+    //~^ ERROR `atomic_load` intrinsic: expected basic integer or pointer type, found `bool`
 }
 
 pub unsafe fn test_bool_store(p: &mut bool, v: bool) {
     intrinsics::atomic_store::<_, { SeqCst }>(p, v);
-    //~^ ERROR `atomic_store` intrinsic: expected basic integer type, found `bool`
+    //~^ ERROR `atomic_store` intrinsic: expected basic integer or pointer type, found `bool`
 }
 
 pub unsafe fn test_bool_xchg(p: &mut bool, v: bool) {
     intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
-    //~^ ERROR `atomic_xchg` intrinsic: expected basic integer type, found `bool`
+    //~^ ERROR `atomic_xchg` intrinsic: expected basic integer or pointer type, found `bool`
 }
 
 pub unsafe fn test_bool_cxchg(p: &mut bool, v: bool) {
     intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
-    //~^ ERROR `atomic_cxchg` intrinsic: expected basic integer type, found `bool`
+    //~^ ERROR `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `bool`
 }
 
 pub unsafe fn test_Foo_load(p: &mut Foo, v: Foo) {
     intrinsics::atomic_load::<_, { SeqCst }>(p);
-    //~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `Foo`
+    //~^ ERROR `atomic_load` intrinsic: expected basic integer or pointer type, found `Foo`
 }
 
 pub unsafe fn test_Foo_store(p: &mut Foo, v: Foo) {
     intrinsics::atomic_store::<_, { SeqCst }>(p, v);
-    //~^ ERROR `atomic_store` intrinsic: expected basic integer type, found `Foo`
+    //~^ ERROR `atomic_store` intrinsic: expected basic integer or pointer type, found `Foo`
 }
 
 pub unsafe fn test_Foo_xchg(p: &mut Foo, v: Foo) {
     intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
-    //~^ ERROR `atomic_xchg` intrinsic: expected basic integer type, found `Foo`
+    //~^ ERROR `atomic_xchg` intrinsic: expected basic integer or pointer type, found `Foo`
 }
 
 pub unsafe fn test_Foo_cxchg(p: &mut Foo, v: Foo) {
     intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
-    //~^ ERROR `atomic_cxchg` intrinsic: expected basic integer type, found `Foo`
+    //~^ ERROR `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `Foo`
 }
 
 pub unsafe fn test_Bar_load(p: &mut Bar, v: Bar) {
     intrinsics::atomic_load::<_, { SeqCst }>(p);
-    //~^ ERROR expected basic integer type, found `&dyn Fn()`
+    //~^ ERROR expected basic integer or pointer type, found `&dyn Fn()`
 }
 
 pub unsafe fn test_Bar_store(p: &mut Bar, v: Bar) {
     intrinsics::atomic_store::<_, { SeqCst }>(p, v);
-    //~^ ERROR expected basic integer type, found `&dyn Fn()`
+    //~^ ERROR expected basic integer or pointer type, found `&dyn Fn()`
 }
 
 pub unsafe fn test_Bar_xchg(p: &mut Bar, v: Bar) {
     intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
-    //~^ ERROR expected basic integer type, found `&dyn Fn()`
+    //~^ ERROR expected basic integer or pointer type, found `&dyn Fn()`
 }
 
 pub unsafe fn test_Bar_cxchg(p: &mut Bar, v: Bar) {
     intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
-    //~^ ERROR expected basic integer type, found `&dyn Fn()`
+    //~^ ERROR expected basic integer or pointer type, found `&dyn Fn()`
 }
 
 pub unsafe fn test_Quux_load(p: &mut Quux, v: Quux) {
     intrinsics::atomic_load::<_, { SeqCst }>(p);
-    //~^ ERROR `atomic_load` intrinsic: expected basic integer type, found `[u8; 100]`
+    //~^ ERROR `atomic_load` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
 }
 
 pub unsafe fn test_Quux_store(p: &mut Quux, v: Quux) {
     intrinsics::atomic_store::<_, { SeqCst }>(p, v);
-    //~^ ERROR `atomic_store` intrinsic: expected basic integer type, found `[u8; 100]`
+    //~^ ERROR `atomic_store` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
 }
 
 pub unsafe fn test_Quux_xchg(p: &mut Quux, v: Quux) {
     intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
-    //~^ ERROR `atomic_xchg` intrinsic: expected basic integer type, found `[u8; 100]`
+    //~^ ERROR `atomic_xchg` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
 }
 
 pub unsafe fn test_Quux_cxchg(p: &mut Quux, v: Quux) {
     intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
-    //~^ ERROR `atomic_cxchg` intrinsic: expected basic integer type, found `[u8; 100]`
+    //~^ ERROR `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
 }
diff --git a/tests/ui/intrinsics/non-integer-atomic.stderr b/tests/ui/intrinsics/non-integer-atomic.stderr
index e539d99b8ae..b96ee7ba846 100644
--- a/tests/ui/intrinsics/non-integer-atomic.stderr
+++ b/tests/ui/intrinsics/non-integer-atomic.stderr
@@ -1,94 +1,94 @@
-error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `bool`
+error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer or pointer type, found `bool`
   --> $DIR/non-integer-atomic.rs:15:5
    |
 LL |     intrinsics::atomic_load::<_, { SeqCst }>(p);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer type, found `bool`
+error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer or pointer type, found `bool`
   --> $DIR/non-integer-atomic.rs:20:5
    |
 LL |     intrinsics::atomic_store::<_, { SeqCst }>(p, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer type, found `bool`
+error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer or pointer type, found `bool`
   --> $DIR/non-integer-atomic.rs:25:5
    |
 LL |     intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer type, found `bool`
+error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `bool`
   --> $DIR/non-integer-atomic.rs:30:5
    |
 LL |     intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `Foo`
+error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer or pointer type, found `Foo`
   --> $DIR/non-integer-atomic.rs:35:5
    |
 LL |     intrinsics::atomic_load::<_, { SeqCst }>(p);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer type, found `Foo`
+error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer or pointer type, found `Foo`
   --> $DIR/non-integer-atomic.rs:40:5
    |
 LL |     intrinsics::atomic_store::<_, { SeqCst }>(p, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer type, found `Foo`
+error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer or pointer type, found `Foo`
   --> $DIR/non-integer-atomic.rs:45:5
    |
 LL |     intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer type, found `Foo`
+error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `Foo`
   --> $DIR/non-integer-atomic.rs:50:5
    |
 LL |     intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `&dyn Fn()`
+error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer or pointer type, found `&dyn Fn()`
   --> $DIR/non-integer-atomic.rs:55:5
    |
 LL |     intrinsics::atomic_load::<_, { SeqCst }>(p);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer type, found `&dyn Fn()`
+error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer or pointer type, found `&dyn Fn()`
   --> $DIR/non-integer-atomic.rs:60:5
    |
 LL |     intrinsics::atomic_store::<_, { SeqCst }>(p, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer type, found `&dyn Fn()`
+error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer or pointer type, found `&dyn Fn()`
   --> $DIR/non-integer-atomic.rs:65:5
    |
 LL |     intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer type, found `&dyn Fn()`
+error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `&dyn Fn()`
   --> $DIR/non-integer-atomic.rs:70:5
    |
 LL |     intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer type, found `[u8; 100]`
+error[E0511]: invalid monomorphization of `atomic_load` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
   --> $DIR/non-integer-atomic.rs:75:5
    |
 LL |     intrinsics::atomic_load::<_, { SeqCst }>(p);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer type, found `[u8; 100]`
+error[E0511]: invalid monomorphization of `atomic_store` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
   --> $DIR/non-integer-atomic.rs:80:5
    |
 LL |     intrinsics::atomic_store::<_, { SeqCst }>(p, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer type, found `[u8; 100]`
+error[E0511]: invalid monomorphization of `atomic_xchg` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
   --> $DIR/non-integer-atomic.rs:85:5
    |
 LL |     intrinsics::atomic_xchg::<_, { SeqCst }>(p, v);
    |     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer type, found `[u8; 100]`
+error[E0511]: invalid monomorphization of `atomic_cxchg` intrinsic: expected basic integer or pointer type, found `[u8; 100]`
   --> $DIR/non-integer-atomic.rs:90:5
    |
 LL |     intrinsics::atomic_cxchg::<_, { SeqCst }, { SeqCst }>(p, v, v);