diff options
30 files changed, 100 insertions, 81 deletions
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 0a180b83355..7e90d77b8f2 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -158,6 +158,7 @@ #![feature(rustc_allow_const_fn_unstable)] #![feature(rustc_attrs)] #![feature(staged_api)] +#![feature(strict_provenance)] #![cfg_attr(test, feature(test))] #![feature(unboxed_closures)] #![feature(unsized_fn_params)] diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs index 3ddc9acb2e7..0b57c36247e 100644 --- a/library/alloc/src/rc.rs +++ b/library/alloc/src/rc.rs @@ -2115,13 +2115,12 @@ impl<T> Weak<T> { #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")] #[must_use] pub const fn new() -> Weak<T> { - Weak { ptr: unsafe { NonNull::new_unchecked(usize::MAX as *mut RcBox<T>) } } + Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<RcBox<T>>(usize::MAX)) } } } } pub(crate) fn is_dangling<T: ?Sized>(ptr: *mut T) -> bool { - let address = ptr as *mut () as usize; - address == usize::MAX + (ptr as *mut ()).addr() == usize::MAX } /// Helper type to allow accessing the reference counts without diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index f52871c73d9..7c892f03bfb 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -1044,7 +1044,7 @@ where impl<T> Drop for MergeHole<T> { fn drop(&mut self) { // `T` is not a zero-sized type, so it's okay to divide by its size. - let len = (self.end as usize - self.start as usize) / mem::size_of::<T>(); + let len = (self.end.addr() - self.start.addr()) / mem::size_of::<T>(); unsafe { ptr::copy_nonoverlapping(self.start, self.dest, len); } diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs index e2b9890850b..f8b4d46ac10 100644 --- a/library/alloc/src/sync.rs +++ b/library/alloc/src/sync.rs @@ -1746,7 +1746,7 @@ impl<T> Weak<T> { #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")] #[must_use] pub const fn new() -> Weak<T> { - Weak { ptr: unsafe { NonNull::new_unchecked(usize::MAX as *mut ArcInner<T>) } } + Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<ArcInner<T>>(usize::MAX)) } } } } diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index f17b8d71b3a..cc6dfb0e330 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -159,7 +159,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> { #[inline] fn size_hint(&self) -> (usize, Option<usize>) { let exact = if mem::size_of::<T>() == 0 { - (self.end as usize).wrapping_sub(self.ptr as usize) + self.end.addr().wrapping_sub(self.ptr.addr()) } else { unsafe { self.end.offset_from(self.ptr) as usize } }; diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs index ea639268652..0639d6eed62 100644 --- a/library/core/src/alloc/layout.rs +++ b/library/core/src/alloc/layout.rs @@ -194,7 +194,7 @@ impl Layout { #[inline] pub const fn dangling(&self) -> NonNull<u8> { // SAFETY: align is guaranteed to be non-zero - unsafe { NonNull::new_unchecked(self.align() as *mut u8) } + unsafe { NonNull::new_unchecked(crate::ptr::invalid_mut::<u8>(self.align())) } } /// Creates a layout describing the record that can hold a value diff --git a/library/core/src/fmt/mod.rs b/library/core/src/fmt/mod.rs index 84cf1753f86..0e2e869a920 100644 --- a/library/core/src/fmt/mod.rs +++ b/library/core/src/fmt/mod.rs @@ -352,7 +352,11 @@ impl<'a> ArgumentV1<'a> { } fn as_usize(&self) -> Option<usize> { - if self.formatter as usize == USIZE_MARKER as usize { + // We are type punning a bit here: USIZE_MARKER only takes an &usize but + // formatter takes an &Opaque. Rust understandably doesn't think we should compare + // the function pointers if they don't have the same signature, so we cast to + // pointers to convince it that we know what we're doing. + if self.formatter as *mut u8 == USIZE_MARKER as *mut u8 { // SAFETY: The `formatter` field is only set to USIZE_MARKER if // the value is a usize, so this is safe Some(unsafe { *(self.value as *const _ as *const usize) }) @@ -2246,7 +2250,7 @@ impl<T: ?Sized> Pointer for *const T { } f.flags |= 1 << (FlagV1::Alternate as u32); - let ret = LowerHex::fmt(&(ptr as usize), f); + let ret = LowerHex::fmt(&(ptr.addr()), f); f.width = old_width; f.flags = old_flags; diff --git a/library/core/src/hash/mod.rs b/library/core/src/hash/mod.rs index 53de8b42c05..45c9df0c930 100644 --- a/library/core/src/hash/mod.rs +++ b/library/core/src/hash/mod.rs @@ -793,7 +793,7 @@ mod impls { #[inline] fn hash<H: Hasher>(&self, state: &mut H) { let (address, metadata) = self.to_raw_parts(); - state.write_usize(address as usize); + state.write_usize(address.addr()); metadata.hash(state); } } @@ -803,7 +803,7 @@ mod impls { #[inline] fn hash<H: Hasher>(&self, state: &mut H) { let (address, metadata) = self.to_raw_parts(); - state.write_usize(address as usize); + state.write_usize(address.addr()); metadata.hash(state); } } diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs index 129402ad23a..8ad4317c145 100644 --- a/library/core/src/intrinsics.rs +++ b/library/core/src/intrinsics.rs @@ -1972,15 +1972,15 @@ extern "rust-intrinsic" { /// Checks whether `ptr` is properly aligned with respect to /// `align_of::<T>()`. pub(crate) fn is_aligned_and_not_null<T>(ptr: *const T) -> bool { - !ptr.is_null() && ptr as usize % mem::align_of::<T>() == 0 + !ptr.is_null() && ptr.addr() % mem::align_of::<T>() == 0 } /// Checks whether the regions of memory starting at `src` and `dst` of size /// `count * size_of::<T>()` do *not* overlap. #[cfg(debug_assertions)] pub(crate) fn is_nonoverlapping<T>(src: *const T, dst: *const T, count: usize) -> bool { - let src_usize = src as usize; - let dst_usize = dst as usize; + let src_usize = src.addr(); + let dst_usize = dst.addr(); let size = mem::size_of::<T>().checked_mul(count).unwrap(); let diff = if src_usize > dst_usize { src_usize - dst_usize } else { dst_usize - src_usize }; // If the absolute distance between the ptrs is at least as big as the size of the buffer, diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs index a698aec51ca..c1b19895f00 100644 --- a/library/core/src/ptr/non_null.rs +++ b/library/core/src/ptr/non_null.rs @@ -90,7 +90,7 @@ impl<T: Sized> NonNull<T> { // to a *mut T. Therefore, `ptr` is not null and the conditions for // calling new_unchecked() are respected. unsafe { - let ptr = mem::align_of::<T>() as *mut T; + let ptr = crate::ptr::invalid_mut::<T>(mem::align_of::<T>()); NonNull::new_unchecked(ptr) } } @@ -469,7 +469,7 @@ impl<T> NonNull<[T]> { /// use std::ptr::NonNull; /// /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3); - /// assert_eq!(slice.as_non_null_ptr(), NonNull::new(1 as *mut i8).unwrap()); + /// assert_eq!(slice.as_non_null_ptr(), NonNull::<i8>::dangling()); /// ``` #[inline] #[must_use] @@ -489,7 +489,7 @@ impl<T> NonNull<[T]> { /// use std::ptr::NonNull; /// /// let slice: NonNull<[i8]> = NonNull::slice_from_raw_parts(NonNull::dangling(), 3); - /// assert_eq!(slice.as_mut_ptr(), 1 as *mut i8); + /// assert_eq!(slice.as_mut_ptr(), NonNull::<i8>::dangling().as_ptr()); /// ``` #[inline] #[must_use] diff --git a/library/core/src/ptr/unique.rs b/library/core/src/ptr/unique.rs index cff68f64f78..29398cbeb23 100644 --- a/library/core/src/ptr/unique.rs +++ b/library/core/src/ptr/unique.rs @@ -73,7 +73,7 @@ impl<T: Sized> Unique<T> { pub const fn dangling() -> Self { // SAFETY: mem::align_of() returns a valid, non-null pointer. The // conditions to call new_unchecked() are thus respected. - unsafe { Unique::new_unchecked(mem::align_of::<T>() as *mut T) } + unsafe { Unique::new_unchecked(crate::ptr::invalid_mut::<T>(mem::align_of::<T>())) } } } diff --git a/library/core/src/slice/ascii.rs b/library/core/src/slice/ascii.rs index c02a6f2d78c..63d761d3c02 100644 --- a/library/core/src/slice/ascii.rs +++ b/library/core/src/slice/ascii.rs @@ -294,7 +294,7 @@ fn is_ascii(s: &[u8]) -> bool { // Paranoia check about alignment, since we're about to do a bunch of // unaligned loads. In practice this should be impossible barring a bug in // `align_offset` though. - debug_assert_eq!((word_ptr as usize) % mem::align_of::<usize>(), 0); + debug_assert_eq!((word_ptr.addr()) % mem::align_of::<usize>(), 0); // Read subsequent words until the last aligned word, excluding the last // aligned word by itself to be done in tail check later, to ensure that @@ -302,9 +302,9 @@ fn is_ascii(s: &[u8]) -> bool { while byte_pos < len - USIZE_SIZE { debug_assert!( // Sanity check that the read is in bounds - (word_ptr as usize + USIZE_SIZE) <= (start.wrapping_add(len) as usize) && + (word_ptr.addr() + USIZE_SIZE) <= (start.wrapping_add(len).addr()) && // And that our assumptions about `byte_pos` hold. - (word_ptr as usize) - (start as usize) == byte_pos + (word_ptr.addr()) - (start.addr()) == byte_pos ); // SAFETY: We know `word_ptr` is properly aligned (because of diff --git a/library/core/src/slice/iter/macros.rs b/library/core/src/slice/iter/macros.rs index cf15756868e..96ead49dd6a 100644 --- a/library/core/src/slice/iter/macros.rs +++ b/library/core/src/slice/iter/macros.rs @@ -20,13 +20,13 @@ macro_rules! len { if size == 0 { // This _cannot_ use `unchecked_sub` because we depend on wrapping // to represent the length of long ZST slice iterators. - ($self.end as usize).wrapping_sub(start.as_ptr() as usize) + ($self.end.addr()).wrapping_sub(start.as_ptr().addr()) } else { // We know that `start <= end`, so can do better than `offset_from`, // which needs to deal in signed. By setting appropriate flags here // we can tell LLVM this, which helps it remove bounds checks. // SAFETY: By the type invariant, `start <= end` - let diff = unsafe { unchecked_sub($self.end as usize, start.as_ptr() as usize) }; + let diff = unsafe { unchecked_sub($self.end.addr(), start.as_ptr().addr()) }; // By also telling LLVM that the pointers are apart by an exact // multiple of the type size, it can optimize `len() == 0` down to // `start == end` instead of `(end - start) < size`. diff --git a/library/core/src/slice/sort.rs b/library/core/src/slice/sort.rs index 2ba0e5320d7..5cf08b5740e 100644 --- a/library/core/src/slice/sort.rs +++ b/library/core/src/slice/sort.rs @@ -269,7 +269,7 @@ where // Returns the number of elements between pointers `l` (inclusive) and `r` (exclusive). fn width<T>(l: *mut T, r: *mut T) -> usize { assert!(mem::size_of::<T>() > 0); - (r as usize - l as usize) / mem::size_of::<T>() + (r.addr() - l.addr()) / mem::size_of::<T>() } loop { diff --git a/library/std/src/backtrace.rs b/library/std/src/backtrace.rs index 94e6070c0f7..fded482095a 100644 --- a/library/std/src/backtrace.rs +++ b/library/std/src/backtrace.rs @@ -293,7 +293,7 @@ impl Backtrace { if !Backtrace::enabled() { return Backtrace { inner: Inner::Disabled }; } - Backtrace::create(Backtrace::capture as usize) + Backtrace::create((Backtrace::capture as *mut ()).addr()) } /// Forcibly captures a full backtrace, regardless of environment variable @@ -308,7 +308,7 @@ impl Backtrace { /// parts of code. #[inline(never)] // want to make sure there's a frame here to remove pub fn force_capture() -> Backtrace { - Backtrace::create(Backtrace::force_capture as usize) + Backtrace::create((Backtrace::force_capture as *mut ()).addr()) } /// Forcibly captures a disabled backtrace, regardless of environment @@ -330,7 +330,7 @@ impl Backtrace { frame: RawFrame::Actual(frame.clone()), symbols: Vec::new(), }); - if frame.symbol_address() as usize == ip && actual_start.is_none() { + if frame.symbol_address().addr() == ip && actual_start.is_none() { actual_start = Some(frames.len()); } true @@ -493,7 +493,7 @@ impl RawFrame { match self { RawFrame::Actual(frame) => frame.ip(), #[cfg(test)] - RawFrame::Fake => 1 as *mut c_void, + RawFrame::Fake => ptr::invalid_mut(1), } } } diff --git a/library/std/src/io/error/repr_bitpacked.rs b/library/std/src/io/error/repr_bitpacked.rs index 208d5a80c5a..7cc1c701064 100644 --- a/library/std/src/io/error/repr_bitpacked.rs +++ b/library/std/src/io/error/repr_bitpacked.rs @@ -106,7 +106,7 @@ use super::{Custom, ErrorData, ErrorKind, SimpleMessage}; use alloc::boxed::Box; use core::marker::PhantomData; use core::mem::{align_of, size_of}; -use core::ptr::NonNull; +use core::ptr::{self, NonNull}; // The 2 least-significant bits are used as tag. const TAG_MASK: usize = 0b11; @@ -136,7 +136,7 @@ impl Repr { let p = Box::into_raw(b).cast::<u8>(); // Should only be possible if an allocator handed out a pointer with // wrong alignment. - debug_assert_eq!((p as usize & TAG_MASK), 0); + debug_assert_eq!((p.addr() & TAG_MASK), 0); // Note: We know `TAG_CUSTOM <= size_of::<Custom>()` (static_assert at // end of file), and both the start and end of the expression must be // valid without address space wraparound due to `Box`'s semantics. @@ -166,7 +166,7 @@ impl Repr { pub(super) fn new_os(code: i32) -> Self { let utagged = ((code as usize) << 32) | TAG_OS; // Safety: `TAG_OS` is not zero, so the result of the `|` is not 0. - let res = Self(unsafe { NonNull::new_unchecked(utagged as *mut ()) }, PhantomData); + let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData); // quickly smoke-check we encoded the right thing (This generally will // only run in libstd's tests, unless the user uses -Zbuild-std) debug_assert!( @@ -180,7 +180,7 @@ impl Repr { pub(super) fn new_simple(kind: ErrorKind) -> Self { let utagged = ((kind as usize) << 32) | TAG_SIMPLE; // Safety: `TAG_SIMPLE` is not zero, so the result of the `|` is not 0. - let res = Self(unsafe { NonNull::new_unchecked(utagged as *mut ()) }, PhantomData); + let res = Self(unsafe { NonNull::new_unchecked(ptr::invalid_mut(utagged)) }, PhantomData); // quickly smoke-check we encoded the right thing (This generally will // only run in libstd's tests, unless the user uses -Zbuild-std) debug_assert!( @@ -238,7 +238,7 @@ unsafe fn decode_repr<C, F>(ptr: NonNull<()>, make_custom: F) -> ErrorData<C> where F: FnOnce(*mut Custom) -> C, { - let bits = ptr.as_ptr() as usize; + let bits = ptr.as_ptr().addr(); match bits & TAG_MASK { TAG_OS => { let code = ((bits as i64) >> 32) as i32; diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index a464f2d4c74..133ced5f26c 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -275,6 +275,7 @@ #![feature(extend_one)] #![feature(float_minimum_maximum)] #![feature(format_args_nl)] +#![feature(strict_provenance)] #![feature(get_mut_unchecked)] #![feature(hashmap_internals)] #![feature(int_error_internals)] diff --git a/library/std/src/os/windows/io/handle.rs b/library/std/src/os/windows/io/handle.rs index 120af9f99dd..ee30cc8be6b 100644 --- a/library/std/src/os/windows/io/handle.rs +++ b/library/std/src/os/windows/io/handle.rs @@ -9,6 +9,7 @@ use crate::fs; use crate::io; use crate::marker::PhantomData; use crate::mem::forget; +use crate::ptr; use crate::sys::c; use crate::sys::cvt; use crate::sys_common::{AsInner, FromInner, IntoInner}; @@ -182,7 +183,7 @@ impl OwnedHandle { return unsafe { Ok(Self::from_raw_handle(handle)) }; } - let mut ret = 0 as c::HANDLE; + let mut ret = ptr::null_mut(); cvt(unsafe { let cur_proc = c::GetCurrentProcess(); c::DuplicateHandle( diff --git a/library/std/src/os/windows/io/socket.rs b/library/std/src/os/windows/io/socket.rs index a6b979cc22b..db93cd15d4a 100644 --- a/library/std/src/os/windows/io/socket.rs +++ b/library/std/src/os/windows/io/socket.rs @@ -129,6 +129,7 @@ impl OwnedSocket { } } + // FIXME(strict_provenance_magic): we defined RawSocket to be a u64 ;-; #[cfg(not(target_vendor = "uwp"))] pub(crate) fn set_no_inherit(&self) -> io::Result<()> { cvt(unsafe { diff --git a/library/std/src/path.rs b/library/std/src/path.rs index bcf5c9328b7..8ecea8ce07f 100644 --- a/library/std/src/path.rs +++ b/library/std/src/path.rs @@ -1449,8 +1449,8 @@ impl PathBuf { }; // truncate until right after the file stem - let end_file_stem = file_stem[file_stem.len()..].as_ptr() as usize; - let start = os_str_as_u8_slice(&self.inner).as_ptr() as usize; + let end_file_stem = file_stem[file_stem.len()..].as_ptr().addr(); + let start = os_str_as_u8_slice(&self.inner).as_ptr().addr(); let v = self.as_mut_vec(); v.truncate(end_file_stem.wrapping_sub(start)); diff --git a/library/std/src/sync/once.rs b/library/std/src/sync/once.rs index 511de863dc5..d2dd4c075d2 100644 --- a/library/std/src/sync/once.rs +++ b/library/std/src/sync/once.rs @@ -91,9 +91,12 @@ use crate::cell::Cell; use crate::fmt; use crate::marker; use crate::panic::{RefUnwindSafe, UnwindSafe}; -use crate::sync::atomic::{AtomicBool, AtomicUsize, Ordering}; +use crate::ptr; +use crate::sync::atomic::{AtomicBool, AtomicPtr, Ordering}; use crate::thread::{self, Thread}; +type Masked = (); + /// A synchronization primitive which can be used to run a one-time global /// initialization. Useful for one-time initialization for FFI or related /// functionality. This type can only be constructed with [`Once::new()`]. @@ -113,7 +116,7 @@ use crate::thread::{self, Thread}; pub struct Once { // `state_and_queue` is actually a pointer to a `Waiter` with extra state // bits, so we add the `PhantomData` appropriately. - state_and_queue: AtomicUsize, + state_and_queue: AtomicPtr<Masked>, _marker: marker::PhantomData<*const Waiter>, } @@ -136,7 +139,7 @@ impl RefUnwindSafe for Once {} #[derive(Debug)] pub struct OnceState { poisoned: bool, - set_state_on_drop_to: Cell<usize>, + set_state_on_drop_to: Cell<*mut Masked>, } /// Initialization value for static [`Once`] values. @@ -184,8 +187,8 @@ struct Waiter { // Every node is a struct on the stack of a waiting thread. // Will wake up the waiters when it gets dropped, i.e. also on panic. struct WaiterQueue<'a> { - state_and_queue: &'a AtomicUsize, - set_state_on_drop_to: usize, + state_and_queue: &'a AtomicPtr<Masked>, + set_state_on_drop_to: *mut Masked, } impl Once { @@ -195,7 +198,10 @@ impl Once { #[rustc_const_stable(feature = "const_once_new", since = "1.32.0")] #[must_use] pub const fn new() -> Once { - Once { state_and_queue: AtomicUsize::new(INCOMPLETE), _marker: marker::PhantomData } + Once { + state_and_queue: AtomicPtr::new(ptr::invalid_mut(INCOMPLETE)), + _marker: marker::PhantomData, + } } /// Performs an initialization routine once and only once. The given closure @@ -376,7 +382,7 @@ impl Once { // operations visible to us, and, this being a fast path, weaker // ordering helps with performance. This `Acquire` synchronizes with // `Release` operations on the slow path. - self.state_and_queue.load(Ordering::Acquire) == COMPLETE + self.state_and_queue.load(Ordering::Acquire).addr() == COMPLETE } // This is a non-generic function to reduce the monomorphization cost of @@ -395,7 +401,7 @@ impl Once { fn call_inner(&self, ignore_poisoning: bool, init: &mut dyn FnMut(&OnceState)) { let mut state_and_queue = self.state_and_queue.load(Ordering::Acquire); loop { - match state_and_queue { + match state_and_queue.addr() { COMPLETE => break, POISONED if !ignore_poisoning => { // Panic to propagate the poison. @@ -405,7 +411,7 @@ impl Once { // Try to register this thread as the one RUNNING. let exchange_result = self.state_and_queue.compare_exchange( state_and_queue, - RUNNING, + ptr::invalid_mut(RUNNING), Ordering::Acquire, Ordering::Acquire, ); @@ -417,13 +423,13 @@ impl Once { // wake them up on drop. let mut waiter_queue = WaiterQueue { state_and_queue: &self.state_and_queue, - set_state_on_drop_to: POISONED, + set_state_on_drop_to: ptr::invalid_mut(POISONED), }; // Run the initialization function, letting it know if we're // poisoned or not. let init_state = OnceState { - poisoned: state_and_queue == POISONED, - set_state_on_drop_to: Cell::new(COMPLETE), + poisoned: state_and_queue.addr() == POISONED, + set_state_on_drop_to: Cell::new(ptr::invalid_mut(COMPLETE)), }; init(&init_state); waiter_queue.set_state_on_drop_to = init_state.set_state_on_drop_to.get(); @@ -432,7 +438,7 @@ impl Once { _ => { // All other values must be RUNNING with possibly a // pointer to the waiter queue in the more significant bits. - assert!(state_and_queue & STATE_MASK == RUNNING); + assert!(state_and_queue.addr() & STATE_MASK == RUNNING); wait(&self.state_and_queue, state_and_queue); state_and_queue = self.state_and_queue.load(Ordering::Acquire); } @@ -441,13 +447,13 @@ impl Once { } } -fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) { +fn wait(state_and_queue: &AtomicPtr<Masked>, mut current_state: *mut Masked) { // Note: the following code was carefully written to avoid creating a // mutable reference to `node` that gets aliased. loop { // Don't queue this thread if the status is no longer running, // otherwise we will not be woken up. - if current_state & STATE_MASK != RUNNING { + if current_state.addr() & STATE_MASK != RUNNING { return; } @@ -455,15 +461,15 @@ fn wait(state_and_queue: &AtomicUsize, mut current_state: usize) { let node = Waiter { thread: Cell::new(Some(thread::current())), signaled: AtomicBool::new(false), - next: (current_state & !STATE_MASK) as *const Waiter, + next: current_state.with_addr(current_state.addr() & !STATE_MASK) as *const Waiter, }; - let me = &node as *const Waiter as usize; + let me = &node as *const Waiter as *const Masked as *mut Masked; // Try to slide in the node at the head of the linked list, making sure // that another thread didn't just replace the head of the linked list. let exchange_result = state_and_queue.compare_exchange( current_state, - me | RUNNING, + me.with_addr(me.addr() | RUNNING), Ordering::Release, Ordering::Relaxed, ); @@ -502,7 +508,7 @@ impl Drop for WaiterQueue<'_> { self.state_and_queue.swap(self.set_state_on_drop_to, Ordering::AcqRel); // We should only ever see an old state which was RUNNING. - assert_eq!(state_and_queue & STATE_MASK, RUNNING); + assert_eq!(state_and_queue.addr() & STATE_MASK, RUNNING); // Walk the entire linked list of waiters and wake them up (in lifo // order, last to register is first to wake up). @@ -511,7 +517,8 @@ impl Drop for WaiterQueue<'_> { // free `node` if there happens to be has a spurious wakeup. // So we have to take out the `thread` field and copy the pointer to // `next` first. - let mut queue = (state_and_queue & !STATE_MASK) as *const Waiter; + let mut queue = + state_and_queue.with_addr(state_and_queue.addr() & !STATE_MASK) as *const Waiter; while !queue.is_null() { let next = (*queue).next; let thread = (*queue).thread.take().unwrap(); @@ -568,6 +575,6 @@ impl OnceState { /// Poison the associated [`Once`] without explicitly panicking. // NOTE: This is currently only exposed for the `lazy` module pub(crate) fn poison(&self) { - self.set_state_on_drop_to.set(POISONED); + self.set_state_on_drop_to.set(ptr::invalid_mut(POISONED)); } } diff --git a/library/std/src/sys/windows/alloc.rs b/library/std/src/sys/windows/alloc.rs index 2fe71f9f28d..fdc81cdea7d 100644 --- a/library/std/src/sys/windows/alloc.rs +++ b/library/std/src/sys/windows/alloc.rs @@ -159,7 +159,7 @@ unsafe fn allocate(layout: Layout, zeroed: bool) -> *mut u8 { // Create a correctly aligned pointer offset from the start of the allocated block, // and write a header before it. - let offset = layout.align() - (ptr as usize & (layout.align() - 1)); + let offset = layout.align() - (ptr.addr() & (layout.align() - 1)); // SAFETY: `MIN_ALIGN` <= `offset` <= `layout.align()` and the size of the allocated // block is `layout.align() + layout.size()`. `aligned` will thus be a correctly aligned // pointer inside the allocated block with at least `layout.size()` bytes after it and at diff --git a/library/std/src/sys/windows/c.rs b/library/std/src/sys/windows/c.rs index 9b61b2476d5..0edf43e5d9d 100644 --- a/library/std/src/sys/windows/c.rs +++ b/library/std/src/sys/windows/c.rs @@ -173,7 +173,7 @@ pub const PROGRESS_CONTINUE: DWORD = 0; pub const E_NOTIMPL: HRESULT = 0x80004001u32 as HRESULT; -pub const INVALID_HANDLE_VALUE: HANDLE = !0 as HANDLE; +pub const INVALID_HANDLE_VALUE: HANDLE = ptr::invalid_mut(!0); pub const FACILITY_NT_BIT: DWORD = 0x1000_0000; diff --git a/library/std/src/sys/windows/compat.rs b/library/std/src/sys/windows/compat.rs index cbd3366b189..a914a3bcc12 100644 --- a/library/std/src/sys/windows/compat.rs +++ b/library/std/src/sys/windows/compat.rs @@ -88,7 +88,7 @@ macro_rules! compat_fn { let symbol_name: *const u8 = concat!(stringify!($symbol), "\0").as_ptr(); let module_handle = $crate::sys::c::GetModuleHandleA(module_name as *const i8); if !module_handle.is_null() { - match $crate::sys::c::GetProcAddress(module_handle, symbol_name as *const i8) as usize { + match $crate::sys::c::GetProcAddress(module_handle, symbol_name as *const i8).addr() { 0 => {} n => { PTR = Some(mem::transmute::<usize, F>(n)); diff --git a/library/std/src/sys/windows/fs.rs b/library/std/src/sys/windows/fs.rs index d6c40a15329..95903899297 100644 --- a/library/std/src/sys/windows/fs.rs +++ b/library/std/src/sys/windows/fs.rs @@ -57,6 +57,9 @@ pub struct DirEntry { data: c::WIN32_FIND_DATAW, } +unsafe impl Send for OpenOptions {} +unsafe impl Sync for OpenOptions {} + #[derive(Clone, Debug)] pub struct OpenOptions { // generic @@ -72,7 +75,7 @@ pub struct OpenOptions { attributes: c::DWORD, share_mode: c::DWORD, security_qos_flags: c::DWORD, - security_attributes: usize, // FIXME: should be a reference + security_attributes: c::LPSECURITY_ATTRIBUTES, } #[derive(Clone, PartialEq, Eq, Debug)] @@ -187,7 +190,7 @@ impl OpenOptions { share_mode: c::FILE_SHARE_READ | c::FILE_SHARE_WRITE | c::FILE_SHARE_DELETE, attributes: 0, security_qos_flags: 0, - security_attributes: 0, + security_attributes: ptr::null_mut(), } } @@ -228,7 +231,7 @@ impl OpenOptions { self.security_qos_flags = flags | c::SECURITY_SQOS_PRESENT; } pub fn security_attributes(&mut self, attrs: c::LPSECURITY_ATTRIBUTES) { - self.security_attributes = attrs as usize; + self.security_attributes = attrs; } fn get_access_mode(&self) -> io::Result<c::DWORD> { @@ -289,7 +292,7 @@ impl File { path.as_ptr(), opts.get_access_mode()?, opts.share_mode, - opts.security_attributes as *mut _, + opts.security_attributes, opts.get_creation_mode()?, opts.get_flags_and_attributes(), ptr::null_mut(), diff --git a/library/std/src/sys/windows/mod.rs b/library/std/src/sys/windows/mod.rs index 62814eaaa56..87e3fec6353 100644 --- a/library/std/src/sys/windows/mod.rs +++ b/library/std/src/sys/windows/mod.rs @@ -136,7 +136,7 @@ pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option<usize> { ($($n:literal,)+) => { $( if start[$n] == needle { - return Some((&start[$n] as *const u16 as usize - ptr as usize) / 2); + return Some(((&start[$n] as *const u16).addr() - ptr.addr()) / 2); } )+ } @@ -149,7 +149,7 @@ pub fn unrolled_find_u16s(needle: u16, haystack: &[u16]) -> Option<usize> { for c in start { if *c == needle { - return Some((c as *const u16 as usize - ptr as usize) / 2); + return Some(((c as *const u16).addr() - ptr.addr()) / 2); } } None diff --git a/library/std/src/sys/windows/os.rs b/library/std/src/sys/windows/os.rs index 450bceae000..bcac996c024 100644 --- a/library/std/src/sys/windows/os.rs +++ b/library/std/src/sys/windows/os.rs @@ -134,7 +134,7 @@ impl Drop for Env { pub fn env() -> Env { unsafe { let ch = c::GetEnvironmentStringsW(); - if ch as usize == 0 { + if ch.is_null() { panic!("failure getting env string from OS: {}", io::Error::last_os_error()); } Env { base: ch, cur: ch } diff --git a/library/std/src/sys/windows/thread_parker.rs b/library/std/src/sys/windows/thread_parker.rs index 5888ee8e34b..3497da51dee 100644 --- a/library/std/src/sys/windows/thread_parker.rs +++ b/library/std/src/sys/windows/thread_parker.rs @@ -60,7 +60,7 @@ use crate::convert::TryFrom; use crate::ptr; use crate::sync::atomic::{ - AtomicI8, AtomicUsize, + AtomicI8, AtomicPtr, Ordering::{Acquire, Relaxed, Release}, }; use crate::sys::{c, dur2timeout}; @@ -217,8 +217,8 @@ impl Parker { } fn keyed_event_handle() -> c::HANDLE { - const INVALID: usize = !0; - static HANDLE: AtomicUsize = AtomicUsize::new(INVALID); + const INVALID: c::HANDLE = ptr::invalid_mut(!0); + static HANDLE: AtomicPtr<libc::c_void> = AtomicPtr::new(INVALID); match HANDLE.load(Relaxed) { INVALID => { let mut handle = c::INVALID_HANDLE_VALUE; @@ -233,7 +233,7 @@ fn keyed_event_handle() -> c::HANDLE { r => panic!("Unable to create keyed event handle: error {r}"), } } - match HANDLE.compare_exchange(INVALID, handle as usize, Relaxed, Relaxed) { + match HANDLE.compare_exchange(INVALID, handle, Relaxed, Relaxed) { Ok(_) => handle, Err(h) => { // Lost the race to another thread initializing HANDLE before we did. @@ -241,10 +241,10 @@ fn keyed_event_handle() -> c::HANDLE { unsafe { c::CloseHandle(handle); } - h as c::HANDLE + h } } } - handle => handle as c::HANDLE, + handle => handle, } } diff --git a/library/std/src/sys_common/condvar/check.rs b/library/std/src/sys_common/condvar/check.rs index 47aff060d6f..7671850ac55 100644 --- a/library/std/src/sys_common/condvar/check.rs +++ b/library/std/src/sys_common/condvar/check.rs @@ -1,4 +1,5 @@ -use crate::sync::atomic::{AtomicUsize, Ordering}; +use crate::ptr; +use crate::sync::atomic::{AtomicPtr, Ordering}; use crate::sys::locks as imp; use crate::sys_common::mutex::MovableMutex; @@ -13,17 +14,18 @@ impl CondvarCheck for Box<imp::Mutex> { } pub struct SameMutexCheck { - addr: AtomicUsize, + addr: AtomicPtr<()>, } #[allow(dead_code)] impl SameMutexCheck { pub const fn new() -> Self { - Self { addr: AtomicUsize::new(0) } + Self { addr: AtomicPtr::new(ptr::null_mut()) } } pub fn verify(&self, mutex: &MovableMutex) { - let addr = mutex.raw() as *const imp::Mutex as usize; - match self.addr.compare_exchange(0, addr, Ordering::SeqCst, Ordering::SeqCst) { + let addr = mutex.raw() as *const imp::Mutex as *const () as *mut _; + match self.addr.compare_exchange(ptr::null_mut(), addr, Ordering::SeqCst, Ordering::SeqCst) + { Ok(_) => {} // Stored the address Err(n) if n == addr => {} // Lost a race to store the same address _ => panic!("attempted to use a condition variable with two mutexes"), diff --git a/library/std/src/thread/local.rs b/library/std/src/thread/local.rs index a100444f049..ca29261b1c9 100644 --- a/library/std/src/thread/local.rs +++ b/library/std/src/thread/local.rs @@ -1071,7 +1071,7 @@ pub mod os { pub unsafe fn get(&'static self, init: impl FnOnce() -> T) -> Option<&'static T> { // SAFETY: See the documentation for this method. let ptr = unsafe { self.os.get() as *mut Value<T> }; - if ptr as usize > 1 { + if ptr.addr() > 1 { // SAFETY: the check ensured the pointer is safe (its destructor // is not running) + it is coming from a trusted source (self). if let Some(ref value) = unsafe { (*ptr).inner.get() } { @@ -1090,7 +1090,7 @@ pub mod os { // SAFETY: No mutable references are ever handed out meaning getting // the value is ok. let ptr = unsafe { self.os.get() as *mut Value<T> }; - if ptr as usize == 1 { + if ptr.addr() == 1 { // destructor is running return None; } @@ -1130,7 +1130,7 @@ pub mod os { unsafe { let ptr = Box::from_raw(ptr as *mut Value<T>); let key = ptr.key; - key.os.set(1 as *mut u8); + key.os.set(ptr::invalid_mut(1)); drop(ptr); key.os.set(ptr::null_mut()); } |
