about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--src/libcore/sync/atomic.rs74
-rw-r--r--src/libstd/lib.rs1
-rw-r--r--src/libstd/sys/wasm/alloc.rs4
-rw-r--r--src/libstd/sys/wasm/condvar_atomics.rs2
-rw-r--r--src/libstd/sys/wasm/mutex_atomics.rs4
5 files changed, 80 insertions, 5 deletions
diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs
index 820c1edf930..251d49db062 100644
--- a/src/libcore/sync/atomic.rs
+++ b/src/libcore/sync/atomic.rs
@@ -802,6 +802,43 @@ impl AtomicBool {
     pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
         unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
     }
+
+    /// Returns a mutable pointer to the underlying [`bool`].
+    ///
+    /// Doing non-atomic reads and writes on the resulting integer can be a data race.
+    /// This method is mostly useful for FFI, where the function signature may use
+    /// `*mut bool` instead of `&AtomicBool`.
+    ///
+    /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
+    /// atomic types work with interior mutability. All modifications of an atomic change the value
+    /// through a shared reference, and can do so safely as long as they use atomic operations. Any
+    /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
+    /// restriction: operations on it must be atomic.
+    ///
+    /// [`bool`]: ../../../std/primitive.bool.html
+    ///
+    /// # Examples
+    ///
+    /// ```ignore (extern-declaration)
+    /// # fn main() {
+    /// use std::sync::atomic::AtomicBool;
+    /// extern {
+    ///     fn my_atomic_op(arg: *mut bool);
+    /// }
+    ///
+    /// let mut atomic = AtomicBool::new(true);
+    /// unsafe {
+    ///     my_atomic_op(atomic.as_mut_ptr());
+    /// }
+    /// # }
+    /// ```
+    #[inline]
+    #[unstable(feature = "atomic_mut_ptr",
+           reason = "recently added",
+           issue = "66893")]
+    pub fn as_mut_ptr(&self) -> *mut bool {
+        self.v.get() as *mut bool
+    }
 }
 
 #[cfg(target_has_atomic_load_store = "ptr")]
@@ -1891,6 +1928,43 @@ assert_eq!(min_foo, 12);
                 }
             }
 
+            doc_comment! {
+                concat!("Returns a mutable pointer to the underlying integer.
+
+Doing non-atomic reads and writes on the resulting integer can be a data race.
+This method is mostly useful for FFI, where the function signature may use
+`*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.
+
+Returning an `*mut` pointer from a shared reference to this atomic is safe because the
+atomic types work with interior mutability. All modifications of an atomic change the value
+through a shared reference, and can do so safely as long as they use atomic operations. Any
+use of the returned raw pointer requires an `unsafe` block and still has to uphold the same
+restriction: operations on it must be atomic.
+
+# Examples
+
+```ignore (extern-declaration)
+# fn main() {
+", $extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";
+
+extern {
+    fn my_atomic_op(arg: *mut ", stringify!($int_type), ");
+}
+
+let mut atomic = ", stringify!($atomic_type), "::new(1);
+unsafe {
+    my_atomic_op(atomic.as_mut_ptr());
+}
+# }
+```"),
+                #[inline]
+                #[unstable(feature = "atomic_mut_ptr",
+                       reason = "recently added",
+                       issue = "66893")]
+                pub fn as_mut_ptr(&self) -> *mut $int_type {
+                    self.v.get()
+                }
+            }
         }
     }
 }
diff --git a/src/libstd/lib.rs b/src/libstd/lib.rs
index acf53f7f003..1dbb0c6ec83 100644
--- a/src/libstd/lib.rs
+++ b/src/libstd/lib.rs
@@ -234,6 +234,7 @@
 #![feature(allocator_internals)]
 #![feature(allow_internal_unsafe)]
 #![feature(allow_internal_unstable)]
+#![feature(atomic_mut_ptr)]
 #![feature(arbitrary_self_types)]
 #![feature(array_error_internals)]
 #![feature(asm)]
diff --git a/src/libstd/sys/wasm/alloc.rs b/src/libstd/sys/wasm/alloc.rs
index c1af6ec1262..05e55334ac0 100644
--- a/src/libstd/sys/wasm/alloc.rs
+++ b/src/libstd/sys/wasm/alloc.rs
@@ -67,7 +67,7 @@ mod lock {
             //
             //     unsafe {
             //         let r = core::arch::wasm32::i32_atomic_wait(
-            //             &LOCKED as *const AtomicI32 as *mut i32,
+            //             LOCKED.as_mut_ptr(),
             //             1,  //     expected value
             //             -1, //     timeout
             //         );
@@ -143,7 +143,7 @@ mod lock {
             //
             //     unsafe {
             //         core::arch::wasm32::atomic_notify(
-            //             &LOCKED as *const AtomicI32 as *mut i32,
+            //             LOCKED.as_mut_ptr(),
             //             1, //     only one thread
             //         );
             //     }
diff --git a/src/libstd/sys/wasm/condvar_atomics.rs b/src/libstd/sys/wasm/condvar_atomics.rs
index 580d2121844..f452bbd3487 100644
--- a/src/libstd/sys/wasm/condvar_atomics.rs
+++ b/src/libstd/sys/wasm/condvar_atomics.rs
@@ -89,6 +89,6 @@ impl Condvar {
     #[inline]
     fn ptr(&self) -> *mut i32 {
         assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>());
-        &self.cnt as *const AtomicUsize as *mut i32
+        self.cnt.as_mut_ptr() as *mut i32
     }
 }
diff --git a/src/libstd/sys/wasm/mutex_atomics.rs b/src/libstd/sys/wasm/mutex_atomics.rs
index 0e4f3d80aa9..cddd584dd22 100644
--- a/src/libstd/sys/wasm/mutex_atomics.rs
+++ b/src/libstd/sys/wasm/mutex_atomics.rs
@@ -56,7 +56,7 @@ impl Mutex {
     #[inline]
     fn ptr(&self) -> *mut i32 {
         assert_eq!(mem::size_of::<usize>(), mem::size_of::<i32>());
-        &self.locked as *const AtomicUsize as *mut isize as *mut i32
+        self.locked.as_mut_ptr() as *mut i32
     }
 }
 
@@ -145,6 +145,6 @@ impl ReentrantMutex {
 
     #[inline]
     fn ptr(&self) -> *mut i32 {
-        &self.owner as *const AtomicU32 as *mut i32
+        self.owner.as_mut_ptr() as *mut i32
     }
 }