about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs12
-rw-r--r--tests/codegen/atomicptr.rs38
2 files changed, 46 insertions, 4 deletions
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 06d9be1869c..160f361b9b5 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -1149,12 +1149,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         order: rustc_codegen_ssa::common::AtomicOrdering,
     ) -> &'ll Value {
         // The only RMW operation that LLVM supports on pointers is compare-exchange.
-        if self.val_ty(src) == self.type_ptr()
-            && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg
-        {
+        let requires_cast_to_int = self.val_ty(src) == self.type_ptr()
+            && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg;
+        if requires_cast_to_int {
             src = self.ptrtoint(src, self.type_isize());
         }
-        unsafe {
+        let mut res = unsafe {
             llvm::LLVMBuildAtomicRMW(
                 self.llbuilder,
                 AtomicRmwBinOp::from_generic(op),
@@ -1163,7 +1163,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 AtomicOrdering::from_generic(order),
                 llvm::False, // SingleThreaded
             )
+        };
+        if requires_cast_to_int {
+            res = self.inttoptr(res, self.type_ptr());
         }
+        res
     }
 
     fn atomic_fence(
diff --git a/tests/codegen/atomicptr.rs b/tests/codegen/atomicptr.rs
new file mode 100644
index 00000000000..cbbd5615512
--- /dev/null
+++ b/tests/codegen/atomicptr.rs
@@ -0,0 +1,38 @@
+// LLVM does not support some atomic RMW operations on pointers, so inside codegen we lower those
+// to integer atomics, surrounded by casts to and from integer type.
+// This test ensures that we do the round-trip correctly for AtomicPtr::fetch_byte_add, and also
+// ensures that we do not have such a round-trip for AtomicPtr::swap, because LLVM supports pointer
+// arguments to `atomicrmw xchg`.
+
+//@ compile-flags: -O -Cno-prepopulate-passes
+#![crate_type = "lib"]
+
+#![feature(strict_provenance)]
+#![feature(strict_provenance_atomic_ptr)]
+
+use std::sync::atomic::AtomicPtr;
+use std::sync::atomic::Ordering::Relaxed;
+use std::ptr::without_provenance_mut;
+
+// Portability hack so that we can say [[USIZE]] instead of i64/i32/i16 for usize.
+// CHECK: @helper([[USIZE:i[0-9]+]] noundef %_1)
+#[no_mangle]
+pub fn helper(_: usize) {}
+
+// CHECK-LABEL: @atomicptr_fetch_byte_add
+#[no_mangle]
+pub fn atomicptr_fetch_byte_add(a: &AtomicPtr<u8>, v: usize) -> *mut u8 {
+    // CHECK: %[[INTPTR:.*]] = ptrtoint ptr %{{.*}} to [[USIZE]]
+    // CHECK-NEXT: %[[RET:.*]] = atomicrmw add ptr %{{.*}}, [[USIZE]] %[[INTPTR]]
+    // CHECK-NEXT: inttoptr [[USIZE]] %[[RET]] to ptr
+    a.fetch_byte_add(v, Relaxed)
+}
+
+// CHECK-LABEL: @atomicptr_swap
+#[no_mangle]
+pub fn atomicptr_swap(a: &AtomicPtr<u8>, ptr: *mut u8) -> *mut u8 {
+    // CHECK-NOT: ptrtoint
+    // CHECK: atomicrmw xchg ptr %{{.*}}, ptr %{{.*}} monotonic
+    // CHECK-NOT: inttoptr
+    a.swap(ptr, Relaxed)
+}