diff options
Diffstat (limited to 'src/libcore/sync')
| -rw-r--r-- | src/libcore/sync/atomic.rs | 64 |
1 files changed, 33 insertions, 31 deletions
diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index 889f182561b..8d55dc55ef2 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -348,7 +348,7 @@ impl AtomicBool { #[inline] #[stable(feature = "atomic_access", since = "1.15.0")] pub fn get_mut(&mut self) -> &mut bool { - // SAFETY: the mutable reference guarantees unique ownership + // SAFETY: the mutable reference guarantees unique ownership. unsafe { &mut *(self.v.get() as *mut bool) } } @@ -399,7 +399,8 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn load(&self, order: Ordering) -> bool { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. unsafe { atomic_load(self.v.get(), order) != 0 } } @@ -432,7 +433,8 @@ impl AtomicBool { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn store(&self, val: bool, order: Ordering) { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: any data races are prevented by atomic intrinsics and the raw + // pointer passed in is valid because we got it from a reference. unsafe { atomic_store(self.v.get(), val as u8, order); } @@ -464,7 +466,7 @@ impl AtomicBool { #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn swap(&self, val: bool, order: Ordering) -> bool { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 } } @@ -560,7 +562,7 @@ impl AtomicBool { success: Ordering, failure: Ordering, ) -> Result<bool, bool> { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. match unsafe { atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure) } { @@ -618,7 +620,7 @@ impl AtomicBool { success: Ordering, failure: Ordering, ) -> Result<bool, bool> { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. match unsafe { atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure) } { @@ -665,7 +667,7 @@ impl AtomicBool { #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_and(self.v.get(), val as u8, order) != 0 } } @@ -761,7 +763,7 @@ impl AtomicBool { #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_or(self.v.get(), val as u8, order) != 0 } } @@ -803,7 +805,7 @@ impl AtomicBool { #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "8")] pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 } } @@ -879,7 +881,7 @@ impl<T> AtomicPtr<T> { #[inline] #[stable(feature = "atomic_access", since = "1.15.0")] pub fn get_mut(&mut self) -> &mut *mut T { - // SAFETY: the mutable reference guarantees unique ownership + // SAFETY: the mutable reference guarantees unique ownership. unsafe { &mut *self.p.get() } } @@ -931,7 +933,7 @@ impl<T> AtomicPtr<T> { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn load(&self, order: Ordering) -> *mut T { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_load(self.p.get() as *mut usize, order) as *mut T } } @@ -966,7 +968,7 @@ impl<T> AtomicPtr<T> { #[inline] #[stable(feature = "rust1", since = "1.0.0")] pub fn store(&self, ptr: *mut T, order: Ordering) { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_store(self.p.get() as *mut usize, ptr as usize, order); } @@ -1000,7 +1002,7 @@ impl<T> AtomicPtr<T> { #[stable(feature = "rust1", since = "1.0.0")] #[cfg(target_has_atomic = "ptr")] pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_swap(self.p.get() as *mut usize, ptr as usize, order) as *mut T } } @@ -1085,7 +1087,7 @@ impl<T> AtomicPtr<T> { success: Ordering, failure: Ordering, ) -> Result<*mut T, *mut T> { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { let res = atomic_compare_exchange( self.p.get() as *mut usize, @@ -1149,7 +1151,7 @@ impl<T> AtomicPtr<T> { success: Ordering, failure: Ordering, ) -> Result<*mut T, *mut T> { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { let res = atomic_compare_exchange_weak( self.p.get() as *mut usize, @@ -1303,7 +1305,7 @@ assert_eq!(some_var.load(Ordering::SeqCst), 5); #[inline] #[$stable_access] pub fn get_mut(&mut self) -> &mut $int_type { - // SAFETY: the mutable reference guarantees unique ownership + // SAFETY: the mutable reference guarantees unique ownership. unsafe { &mut *self.v.get() } } } @@ -1358,7 +1360,7 @@ assert_eq!(some_var.load(Ordering::Relaxed), 5); #[inline] #[$stable] pub fn load(&self, order: Ordering) -> $int_type { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_load(self.v.get(), order) } } } @@ -1393,7 +1395,7 @@ assert_eq!(some_var.load(Ordering::Relaxed), 10); #[inline] #[$stable] pub fn store(&self, val: $int_type, order: Ordering) { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_store(self.v.get(), val, order); } } } @@ -1424,7 +1426,7 @@ assert_eq!(some_var.swap(10, Ordering::Relaxed), 5); #[$stable] #[$cfg_cas] pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_swap(self.v.get(), val, order) } } } @@ -1527,7 +1529,7 @@ assert_eq!(some_var.load(Ordering::Relaxed), 10); new: $int_type, success: Ordering, failure: Ordering) -> Result<$int_type, $int_type> { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) } } } @@ -1580,7 +1582,7 @@ loop { new: $int_type, success: Ordering, failure: Ordering) -> Result<$int_type, $int_type> { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_compare_exchange_weak(self.v.get(), current, new, success, failure) } @@ -1615,7 +1617,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); #[$stable] #[$cfg_cas] pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_add(self.v.get(), val, order) } } } @@ -1648,7 +1650,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 10); #[$stable] #[$cfg_cas] pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_sub(self.v.get(), val, order) } } } @@ -1684,7 +1686,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b100001); #[$stable] #[$cfg_cas] pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_and(self.v.get(), val, order) } } } @@ -1721,7 +1723,7 @@ assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31)); #[$stable_nand] #[$cfg_cas] pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_nand(self.v.get(), val, order) } } } @@ -1757,7 +1759,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b111111); #[$stable] #[$cfg_cas] pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_or(self.v.get(), val, order) } } } @@ -1793,7 +1795,7 @@ assert_eq!(foo.load(Ordering::SeqCst), 0b011110); #[$stable] #[$cfg_cas] pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { atomic_xor(self.v.get(), val, order) } } } @@ -1905,7 +1907,7 @@ assert!(max_foo == 42); issue = "48655")] #[$cfg_cas] pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { $max_fn(self.v.get(), val, order) } } } @@ -1958,7 +1960,7 @@ assert_eq!(min_foo, 12); issue = "48655")] #[$cfg_cas] pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type { - // SAFETY: data races are prevented by atomic intrinsics + // SAFETY: data races are prevented by atomic intrinsics. unsafe { $min_fn(self.v.get(), val, order) } } } @@ -2553,7 +2555,7 @@ pub fn fence(order: Ordering) { // https://github.com/WebAssembly/tool-conventions/issues/59. We should // follow that discussion and implement a solution when one comes about! #[cfg(not(target_arch = "wasm32"))] - // SAFETY: using an atomic fence is safe + // SAFETY: using an atomic fence is safe. unsafe { match order { Acquire => intrinsics::atomic_fence_acq(), @@ -2641,7 +2643,7 @@ pub fn fence(order: Ordering) { #[inline] #[stable(feature = "compiler_fences", since = "1.21.0")] pub fn compiler_fence(order: Ordering) { - // SAFETY: doesn't compile to machine code + // SAFETY: using an atomic fence is safe. unsafe { match order { Acquire => intrinsics::atomic_singlethreadfence_acq(), |
