about summary refs log tree commit diff
diff options
context:
space:
mode:
authorCaleb Zulawski <caleb.zulawski@gmail.com>2022-10-29 17:51:36 -0400
committerGitHub <noreply@github.com>2022-10-29 17:51:36 -0400
commit87779aef0bae771658ebd31336d918e575a05730 (patch)
treec02f45aaf659af4710b1fd194b71018ee73b996f
parent7c80b6967ace2b669dc921e67d637c0d546318a9 (diff)
parent469c620bded61d265ef020b2442b1f639b2d8c10 (diff)
downloadrust-87779aef0bae771658ebd31336d918e575a05730.tar.gz
rust-87779aef0bae771658ebd31336d918e575a05730.zip
Merge pull request #287 from rust-lang/feature/pointer-vectors
Vectors of pointers
-rw-r--r--crates/core_simd/src/cast.rs55
-rw-r--r--crates/core_simd/src/elements.rs4
-rw-r--r--crates/core_simd/src/elements/const_ptr.rs139
-rw-r--r--crates/core_simd/src/elements/mut_ptr.rs134
-rw-r--r--crates/core_simd/src/eq.rs38
-rw-r--r--crates/core_simd/src/intrinsics.rs16
-rw-r--r--crates/core_simd/src/lib.rs4
-rw-r--r--crates/core_simd/src/mod.rs2
-rw-r--r--crates/core_simd/src/ord.rs102
-rw-r--r--crates/core_simd/src/vector.rs60
-rw-r--r--crates/core_simd/src/vector/ptr.rs51
-rw-r--r--crates/core_simd/tests/pointers.rs59
-rw-r--r--crates/test_helpers/src/biteq.rs20
-rw-r--r--crates/test_helpers/src/lib.rs63
14 files changed, 658 insertions, 89 deletions
diff --git a/crates/core_simd/src/cast.rs b/crates/core_simd/src/cast.rs
new file mode 100644
index 00000000000..65a3f845ffc
--- /dev/null
+++ b/crates/core_simd/src/cast.rs
@@ -0,0 +1,55 @@
+use crate::simd::SimdElement;
+
+/// Supporting trait for `Simd::cast`.  Typically doesn't need to be used directly.
+///
+/// # Safety
+/// Implementing this trait asserts that the type is a valid vector element for the `simd_cast` or
+/// `simd_as` intrinsics.
+pub unsafe trait SimdCast: SimdElement {}
+
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for i8 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for i16 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for i32 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for i64 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for isize {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for u8 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for u16 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for u32 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for u64 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for usize {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for f32 {}
+// Safety: primitive number types can be cast to other primitive number types
+unsafe impl SimdCast for f64 {}
+
+/// Supporting trait for `Simd::cast_ptr`.  Typically doesn't need to be used directly.
+///
+/// # Safety
+/// Implementing this trait asserts that the type is a valid vector element for the `simd_cast_ptr`
+/// intrinsic.
+pub unsafe trait SimdCastPtr<T> {}
+
+// Safety: pointers can be cast to other pointer types
+unsafe impl<T, U> SimdCastPtr<T> for *const U
+where
+    U: core::ptr::Pointee,
+    T: core::ptr::Pointee<Metadata = U::Metadata>,
+{
+}
+// Safety: pointers can be cast to other pointer types
+unsafe impl<T, U> SimdCastPtr<T> for *mut U
+where
+    U: core::ptr::Pointee,
+    T: core::ptr::Pointee<Metadata = U::Metadata>,
+{
+}
diff --git a/crates/core_simd/src/elements.rs b/crates/core_simd/src/elements.rs
index 701eb66b248..dc7f52a4d57 100644
--- a/crates/core_simd/src/elements.rs
+++ b/crates/core_simd/src/elements.rs
@@ -1,11 +1,15 @@
+mod const_ptr;
 mod float;
 mod int;
+mod mut_ptr;
 mod uint;
 
 mod sealed {
     pub trait Sealed {}
 }
 
+pub use const_ptr::*;
 pub use float::*;
 pub use int::*;
+pub use mut_ptr::*;
 pub use uint::*;
diff --git a/crates/core_simd/src/elements/const_ptr.rs b/crates/core_simd/src/elements/const_ptr.rs
new file mode 100644
index 00000000000..f7227a56d58
--- /dev/null
+++ b/crates/core_simd/src/elements/const_ptr.rs
@@ -0,0 +1,139 @@
+use super::sealed::Sealed;
+use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
+
+/// Operations on SIMD vectors of constant pointers.
+pub trait SimdConstPtr: Copy + Sealed {
+    /// Vector of `usize` with the same number of lanes.
+    type Usize;
+
+    /// Vector of `isize` with the same number of lanes.
+    type Isize;
+
+    /// Vector of mutable pointers to the same type.
+    type MutPtr;
+
+    /// Mask type used for manipulating this SIMD vector type.
+    type Mask;
+
+    /// Returns `true` for each lane that is null.
+    fn is_null(self) -> Self::Mask;
+
+    /// Changes constness without changing the type.
+    fn as_mut(self) -> Self::MutPtr;
+
+    /// Gets the "address" portion of the pointer.
+    ///
+    /// This method discards pointer semantic metadata, so the result cannot be
+    /// directly cast into a valid pointer.
+    ///
+    /// This method semantically discards *provenance* and
+    /// *address-space* information. To properly restore that information, use [`Self::with_addr`].
+    ///
+    /// Equivalent to calling [`pointer::addr`] on each lane.
+    fn addr(self) -> Self::Usize;
+
+    /// Creates a new pointer with the given address.
+    ///
+    /// This performs the same operation as a cast, but copies the *address-space* and
+    /// *provenance* of `self` to the new pointer.
+    ///
+    /// Equivalent to calling [`pointer::with_addr`] on each lane.
+    fn with_addr(self, addr: Self::Usize) -> Self;
+
+    /// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
+    /// in [`Self::from_exposed_addr`].
+    fn expose_addr(self) -> Self::Usize;
+
+    /// Convert an address back to a pointer, picking up a previously "exposed" provenance.
+    ///
+    /// Equivalent to calling [`core::ptr::from_exposed_addr`] on each lane.
+    fn from_exposed_addr(addr: Self::Usize) -> Self;
+
+    /// Calculates the offset from a pointer using wrapping arithmetic.
+    ///
+    /// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
+    fn wrapping_offset(self, offset: Self::Isize) -> Self;
+
+    /// Calculates the offset from a pointer using wrapping arithmetic.
+    ///
+    /// Equivalent to calling [`pointer::wrapping_add`] on each lane.
+    fn wrapping_add(self, count: Self::Usize) -> Self;
+
+    /// Calculates the offset from a pointer using wrapping arithmetic.
+    ///
+    /// Equivalent to calling [`pointer::wrapping_sub`] on each lane.
+    fn wrapping_sub(self, count: Self::Usize) -> Self;
+}
+
+impl<T, const LANES: usize> Sealed for Simd<*const T, LANES> where
+    LaneCount<LANES>: SupportedLaneCount
+{
+}
+
+impl<T, const LANES: usize> SimdConstPtr for Simd<*const T, LANES>
+where
+    LaneCount<LANES>: SupportedLaneCount,
+{
+    type Usize = Simd<usize, LANES>;
+    type Isize = Simd<isize, LANES>;
+    type MutPtr = Simd<*mut T, LANES>;
+    type Mask = Mask<isize, LANES>;
+
+    #[inline]
+    fn is_null(self) -> Self::Mask {
+        Simd::splat(core::ptr::null()).simd_eq(self)
+    }
+
+    #[inline]
+    fn as_mut(self) -> Self::MutPtr {
+        self.cast_ptr()
+    }
+
+    #[inline]
+    fn addr(self) -> Self::Usize {
+        // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+        // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
+        // provenance).
+        unsafe { core::mem::transmute_copy(&self) }
+    }
+
+    #[inline]
+    fn with_addr(self, addr: Self::Usize) -> Self {
+        // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+        //
+        // In the mean-time, this operation is defined to be "as if" it was
+        // a wrapping_offset, so we can emulate it as such. This should properly
+        // restore pointer provenance even under today's compiler.
+        self.cast_ptr::<*const u8>()
+            .wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
+            .cast_ptr()
+    }
+
+    #[inline]
+    fn expose_addr(self) -> Self::Usize {
+        // Safety: `self` is a pointer vector
+        unsafe { intrinsics::simd_expose_addr(self) }
+    }
+
+    #[inline]
+    fn from_exposed_addr(addr: Self::Usize) -> Self {
+        // Safety: `self` is a pointer vector
+        unsafe { intrinsics::simd_from_exposed_addr(addr) }
+    }
+
+    #[inline]
+    fn wrapping_offset(self, count: Self::Isize) -> Self {
+        // Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
+        unsafe { intrinsics::simd_arith_offset(self, count) }
+    }
+
+    #[inline]
+    fn wrapping_add(self, count: Self::Usize) -> Self {
+        self.wrapping_offset(count.cast())
+    }
+
+    #[inline]
+    fn wrapping_sub(self, count: Self::Usize) -> Self {
+        self.wrapping_offset(-count.cast::<isize>())
+    }
+}
diff --git a/crates/core_simd/src/elements/mut_ptr.rs b/crates/core_simd/src/elements/mut_ptr.rs
new file mode 100644
index 00000000000..e2fd438ef8f
--- /dev/null
+++ b/crates/core_simd/src/elements/mut_ptr.rs
@@ -0,0 +1,134 @@
+use super::sealed::Sealed;
+use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
+
+/// Operations on SIMD vectors of mutable pointers.
+pub trait SimdMutPtr: Copy + Sealed {
+    /// Vector of `usize` with the same number of lanes.
+    type Usize;
+
+    /// Vector of `isize` with the same number of lanes.
+    type Isize;
+
+    /// Vector of constant pointers to the same type.
+    type ConstPtr;
+
+    /// Mask type used for manipulating this SIMD vector type.
+    type Mask;
+
+    /// Returns `true` for each lane that is null.
+    fn is_null(self) -> Self::Mask;
+
+    /// Changes constness without changing the type.
+    fn as_const(self) -> Self::ConstPtr;
+
+    /// Gets the "address" portion of the pointer.
+    ///
+    /// This method discards pointer semantic metadata, so the result cannot be
+    /// directly cast into a valid pointer.
+    ///
+    /// Equivalent to calling [`pointer::addr`] on each lane.
+    fn addr(self) -> Self::Usize;
+
+    /// Creates a new pointer with the given address.
+    ///
+    /// This performs the same operation as a cast, but copies the *address-space* and
+    /// *provenance* of `self` to the new pointer.
+    ///
+    /// Equivalent to calling [`pointer::with_addr`] on each lane.
+    fn with_addr(self, addr: Self::Usize) -> Self;
+
+    /// Gets the "address" portion of the pointer, and "exposes" the provenance part for future use
+    /// in [`Self::from_exposed_addr`].
+    fn expose_addr(self) -> Self::Usize;
+
+    /// Convert an address back to a pointer, picking up a previously "exposed" provenance.
+    ///
+    /// Equivalent to calling [`core::ptr::from_exposed_addr_mut`] on each lane.
+    fn from_exposed_addr(addr: Self::Usize) -> Self;
+
+    /// Calculates the offset from a pointer using wrapping arithmetic.
+    ///
+    /// Equivalent to calling [`pointer::wrapping_offset`] on each lane.
+    fn wrapping_offset(self, offset: Self::Isize) -> Self;
+
+    /// Calculates the offset from a pointer using wrapping arithmetic.
+    ///
+    /// Equivalent to calling [`pointer::wrapping_add`] on each lane.
+    fn wrapping_add(self, count: Self::Usize) -> Self;
+
+    /// Calculates the offset from a pointer using wrapping arithmetic.
+    ///
+    /// Equivalent to calling [`pointer::wrapping_sub`] on each lane.
+    fn wrapping_sub(self, count: Self::Usize) -> Self;
+}
+
+impl<T, const LANES: usize> Sealed for Simd<*mut T, LANES> where LaneCount<LANES>: SupportedLaneCount
+{}
+
+impl<T, const LANES: usize> SimdMutPtr for Simd<*mut T, LANES>
+where
+    LaneCount<LANES>: SupportedLaneCount,
+{
+    type Usize = Simd<usize, LANES>;
+    type Isize = Simd<isize, LANES>;
+    type ConstPtr = Simd<*const T, LANES>;
+    type Mask = Mask<isize, LANES>;
+
+    #[inline]
+    fn is_null(self) -> Self::Mask {
+        Simd::splat(core::ptr::null_mut()).simd_eq(self)
+    }
+
+    #[inline]
+    fn as_const(self) -> Self::ConstPtr {
+        self.cast_ptr()
+    }
+
+    #[inline]
+    fn addr(self) -> Self::Usize {
+        // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+        // SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
+        // provenance).
+        unsafe { core::mem::transmute_copy(&self) }
+    }
+
+    #[inline]
+    fn with_addr(self, addr: Self::Usize) -> Self {
+        // FIXME(strict_provenance_magic): I am magic and should be a compiler intrinsic.
+        //
+        // In the mean-time, this operation is defined to be "as if" it was
+        // a wrapping_offset, so we can emulate it as such. This should properly
+        // restore pointer provenance even under today's compiler.
+        self.cast_ptr::<*mut u8>()
+            .wrapping_offset(addr.cast::<isize>() - self.addr().cast::<isize>())
+            .cast_ptr()
+    }
+
+    #[inline]
+    fn expose_addr(self) -> Self::Usize {
+        // Safety: `self` is a pointer vector
+        unsafe { intrinsics::simd_expose_addr(self) }
+    }
+
+    #[inline]
+    fn from_exposed_addr(addr: Self::Usize) -> Self {
+        // Safety: `self` is a pointer vector
+        unsafe { intrinsics::simd_from_exposed_addr(addr) }
+    }
+
+    #[inline]
+    fn wrapping_offset(self, count: Self::Isize) -> Self {
+        // Safety: simd_arith_offset takes a vector of pointers and a vector of offsets
+        unsafe { intrinsics::simd_arith_offset(self, count) }
+    }
+
+    #[inline]
+    fn wrapping_add(self, count: Self::Usize) -> Self {
+        self.wrapping_offset(count.cast())
+    }
+
+    #[inline]
+    fn wrapping_sub(self, count: Self::Usize) -> Self {
+        self.wrapping_offset(-count.cast::<isize>())
+    }
+}
diff --git a/crates/core_simd/src/eq.rs b/crates/core_simd/src/eq.rs
index c7111f720a8..80763c07272 100644
--- a/crates/core_simd/src/eq.rs
+++ b/crates/core_simd/src/eq.rs
@@ -1,4 +1,6 @@
-use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdElement, SupportedLaneCount};
+use crate::simd::{
+    intrinsics, LaneCount, Mask, Simd, SimdConstPtr, SimdElement, SimdMutPtr, SupportedLaneCount,
+};
 
 /// Parallel `PartialEq`.
 pub trait SimdPartialEq {
@@ -71,3 +73,37 @@ macro_rules! impl_mask {
 }
 
 impl_mask! { i8, i16, i32, i64, isize }
+
+impl<T, const LANES: usize> SimdPartialEq for Simd<*const T, LANES>
+where
+    LaneCount<LANES>: SupportedLaneCount,
+{
+    type Mask = Mask<isize, LANES>;
+
+    #[inline]
+    fn simd_eq(self, other: Self) -> Self::Mask {
+        self.addr().simd_eq(other.addr())
+    }
+
+    #[inline]
+    fn simd_ne(self, other: Self) -> Self::Mask {
+        self.addr().simd_ne(other.addr())
+    }
+}
+
+impl<T, const LANES: usize> SimdPartialEq for Simd<*mut T, LANES>
+where
+    LaneCount<LANES>: SupportedLaneCount,
+{
+    type Mask = Mask<isize, LANES>;
+
+    #[inline]
+    fn simd_eq(self, other: Self) -> Self::Mask {
+        self.addr().simd_eq(other.addr())
+    }
+
+    #[inline]
+    fn simd_ne(self, other: Self) -> Self::Mask {
+        self.addr().simd_ne(other.addr())
+    }
+}
diff --git a/crates/core_simd/src/intrinsics.rs b/crates/core_simd/src/intrinsics.rs
index 6047890a093..d5466822b93 100644
--- a/crates/core_simd/src/intrinsics.rs
+++ b/crates/core_simd/src/intrinsics.rs
@@ -61,9 +61,6 @@ extern "platform-intrinsic" {
     /// xor
     pub(crate) fn simd_xor<T>(x: T, y: T) -> T;
 
-    /// getelementptr (without inbounds)
-    pub(crate) fn simd_arith_offset<T, U>(ptrs: T, offsets: U) -> T;
-
     /// fptoui/fptosi/uitofp/sitofp
     /// casting floats to integers is truncating, so it is safe to convert values like e.g. 1.5
     /// but the truncated value must fit in the target type or the result is poison.
@@ -151,4 +148,17 @@ extern "platform-intrinsic" {
     pub(crate) fn simd_select<M, T>(m: M, yes: T, no: T) -> T;
     #[allow(unused)]
     pub(crate) fn simd_select_bitmask<M, T>(m: M, yes: T, no: T) -> T;
+
+    /// getelementptr (without inbounds)
+    /// equivalent to wrapping_offset
+    pub(crate) fn simd_arith_offset<T, U>(ptr: T, offset: U) -> T;
+
+    /// equivalent to `T as U` semantics, specifically for pointers
+    pub(crate) fn simd_cast_ptr<T, U>(ptr: T) -> U;
+
+    /// expose a pointer as an address
+    pub(crate) fn simd_expose_addr<T, U>(ptr: T) -> U;
+
+    /// convert an exposed address back to a pointer
+    pub(crate) fn simd_from_exposed_addr<T, U>(addr: T) -> U;
 }
diff --git a/crates/core_simd/src/lib.rs b/crates/core_simd/src/lib.rs
index 715f258f617..82873162969 100644
--- a/crates/core_simd/src/lib.rs
+++ b/crates/core_simd/src/lib.rs
@@ -7,7 +7,9 @@
     repr_simd,
     simd_ffi,
     staged_api,
-    stdsimd
+    stdsimd,
+    strict_provenance,
+    ptr_metadata
 )]
 #![cfg_attr(feature = "generic_const_exprs", feature(generic_const_exprs))]
 #![cfg_attr(feature = "generic_const_exprs", allow(incomplete_features))]
diff --git a/crates/core_simd/src/mod.rs b/crates/core_simd/src/mod.rs
index 9909d639874..ece026a448b 100644
--- a/crates/core_simd/src/mod.rs
+++ b/crates/core_simd/src/mod.rs
@@ -7,6 +7,7 @@ pub(crate) mod intrinsics;
 mod to_bytes;
 
 mod alias;
+mod cast;
 mod elements;
 mod eq;
 mod fmt;
@@ -24,6 +25,7 @@ pub mod simd {
     pub(crate) use crate::core_simd::intrinsics;
 
     pub use crate::core_simd::alias::*;
+    pub use crate::core_simd::cast::*;
     pub use crate::core_simd::elements::*;
     pub use crate::core_simd::eq::*;
     pub use crate::core_simd::lane_count::{LaneCount, SupportedLaneCount};
diff --git a/crates/core_simd/src/ord.rs b/crates/core_simd/src/ord.rs
index 9a87bc2e344..1ae9cd061fb 100644
--- a/crates/core_simd/src/ord.rs
+++ b/crates/core_simd/src/ord.rs
@@ -1,4 +1,6 @@
-use crate::simd::{intrinsics, LaneCount, Mask, Simd, SimdPartialEq, SupportedLaneCount};
+use crate::simd::{
+    intrinsics, LaneCount, Mask, Simd, SimdConstPtr, SimdMutPtr, SimdPartialEq, SupportedLaneCount,
+};
 
 /// Parallel `PartialOrd`.
 pub trait SimdPartialOrd: SimdPartialEq {
@@ -211,3 +213,101 @@ macro_rules! impl_mask {
 }
 
 impl_mask! { i8, i16, i32, i64, isize }
+
+impl<T, const LANES: usize> SimdPartialOrd for Simd<*const T, LANES>
+where
+    LaneCount<LANES>: SupportedLaneCount,
+{
+    #[inline]
+    fn simd_lt(self, other: Self) -> Self::Mask {
+        self.addr().simd_lt(other.addr())
+    }
+
+    #[inline]
+    fn simd_le(self, other: Self) -> Self::Mask {
+        self.addr().simd_le(other.addr())
+    }
+
+    #[inline]
+    fn simd_gt(self, other: Self) -> Self::Mask {
+        self.addr().simd_gt(other.addr())
+    }
+
+    #[inline]
+    fn simd_ge(self, other: Self) -> Self::Mask {
+        self.addr().simd_ge(other.addr())
+    }
+}
+
+impl<T, const LANES: usize> SimdOrd for Simd<*const T, LANES>
+where
+    LaneCount<LANES>: SupportedLaneCount,
+{
+    #[inline]
+    fn simd_max(self, other: Self) -> Self {
+        self.simd_lt(other).select(other, self)
+    }
+
+    #[inline]
+    fn simd_min(self, other: Self) -> Self {
+        self.simd_gt(other).select(other, self)
+    }
+
+    #[inline]
+    fn simd_clamp(self, min: Self, max: Self) -> Self {
+        assert!(
+            min.simd_le(max).all(),
+            "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+        );
+        self.simd_max(min).simd_min(max)
+    }
+}
+
+impl<T, const LANES: usize> SimdPartialOrd for Simd<*mut T, LANES>
+where
+    LaneCount<LANES>: SupportedLaneCount,
+{
+    #[inline]
+    fn simd_lt(self, other: Self) -> Self::Mask {
+        self.addr().simd_lt(other.addr())
+    }
+
+    #[inline]
+    fn simd_le(self, other: Self) -> Self::Mask {
+        self.addr().simd_le(other.addr())
+    }
+
+    #[inline]
+    fn simd_gt(self, other: Self) -> Self::Mask {
+        self.addr().simd_gt(other.addr())
+    }
+
+    #[inline]
+    fn simd_ge(self, other: Self) -> Self::Mask {
+        self.addr().simd_ge(other.addr())
+    }
+}
+
+impl<T, const LANES: usize> SimdOrd for Simd<*mut T, LANES>
+where
+    LaneCount<LANES>: SupportedLaneCount,
+{
+    #[inline]
+    fn simd_max(self, other: Self) -> Self {
+        self.simd_lt(other).select(other, self)
+    }
+
+    #[inline]
+    fn simd_min(self, other: Self) -> Self {
+        self.simd_gt(other).select(other, self)
+    }
+
+    #[inline]
+    fn simd_clamp(self, min: Self, max: Self) -> Self {
+        assert!(
+            min.simd_le(max).all(),
+            "each lane in `min` must be less than or equal to the corresponding lane in `max`",
+        );
+        self.simd_max(min).simd_min(max)
+    }
+}
diff --git a/crates/core_simd/src/vector.rs b/crates/core_simd/src/vector.rs
index 7f0e8350cf8..c5d68f1b921 100644
--- a/crates/core_simd/src/vector.rs
+++ b/crates/core_simd/src/vector.rs
@@ -1,8 +1,6 @@
-// Vectors of pointers are not for public use at the current time.
-pub(crate) mod ptr;
-
 use crate::simd::{
-    intrinsics, LaneCount, Mask, MaskElement, SimdPartialOrd, SupportedLaneCount, Swizzle,
+    intrinsics, LaneCount, Mask, MaskElement, SimdCast, SimdCastPtr, SimdConstPtr, SimdMutPtr,
+    SimdPartialOrd, SupportedLaneCount, Swizzle,
 };
 
 /// A SIMD vector of `LANES` elements of type `T`. `Simd<T, N>` has the same shape as [`[T; N]`](array), but operates like `T`.
@@ -211,11 +209,26 @@ where
     #[must_use]
     #[inline]
     #[cfg(not(bootstrap))]
-    pub fn cast<U: SimdElement>(self) -> Simd<U, LANES> {
-        // Safety: The input argument is a vector of a valid SIMD element type.
+    pub fn cast<U: SimdCast>(self) -> Simd<U, LANES>
+    where
+        T: SimdCast,
+    {
+        // Safety: supported types are guaranteed by SimdCast
         unsafe { intrinsics::simd_as(self) }
     }
 
+    /// Lanewise casts pointers to another pointer type.
+    #[must_use]
+    #[inline]
+    pub fn cast_ptr<U>(self) -> Simd<U, LANES>
+    where
+        T: SimdCastPtr<U>,
+        U: SimdElement,
+    {
+        // Safety: supported types are guaranteed by SimdCastPtr
+        unsafe { intrinsics::simd_cast_ptr(self) }
+    }
+
     /// Rounds toward zero and converts to the same-width integer type, assuming that
     /// the value is finite and fits in that type.
     ///
@@ -234,11 +247,10 @@ where
     #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
     pub unsafe fn to_int_unchecked<I>(self) -> Simd<I, LANES>
     where
-        T: core::convert::FloatToInt<I>,
-        I: SimdElement,
+        T: core::convert::FloatToInt<I> + SimdCast,
+        I: SimdCast,
     {
-        // Safety: `self` is a vector, and `FloatToInt` ensures the type can be casted to
-        // an integer.
+        // Safety: supported types are guaranteed by SimdCast, the caller is responsible for the extra invariants
         unsafe { intrinsics::simd_cast(self) }
     }
 
@@ -349,7 +361,7 @@ where
         idxs: Simd<usize, LANES>,
         or: Self,
     ) -> Self {
-        let base_ptr = crate::simd::ptr::SimdConstPtr::splat(slice.as_ptr());
+        let base_ptr = Simd::<*const T, LANES>::splat(slice.as_ptr());
         // Ferris forgive me, I have done pointer arithmetic here.
         let ptrs = base_ptr.wrapping_add(idxs);
         // Safety: The ptrs have been bounds-masked to prevent memory-unsafe reads insha'allah
@@ -457,7 +469,7 @@ where
         // 3. &mut [T] which will become our base ptr.
         unsafe {
             // Now Entering ☢️ *mut T Zone
-            let base_ptr = crate::simd::ptr::SimdMutPtr::splat(slice.as_mut_ptr());
+            let base_ptr = Simd::<*mut T, LANES>::splat(slice.as_mut_ptr());
             // Ferris forgive me, I have done pointer arithmetic here.
             let ptrs = base_ptr.wrapping_add(idxs);
             // The ptrs have been bounds-masked to prevent memory-unsafe writes insha'allah
@@ -739,3 +751,27 @@ impl Sealed for f64 {}
 unsafe impl SimdElement for f64 {
     type Mask = i64;
 }
+
+impl<T> Sealed for *const T {}
+
+// Safety: (thin) const pointers are valid SIMD element types, and are supported by this API
+//
+// Fat pointers may be supported in the future.
+unsafe impl<T> SimdElement for *const T
+where
+    T: core::ptr::Pointee<Metadata = ()>,
+{
+    type Mask = isize;
+}
+
+impl<T> Sealed for *mut T {}
+
+// Safety: (thin) mut pointers are valid SIMD element types, and are supported by this API
+//
+// Fat pointers may be supported in the future.
+unsafe impl<T> SimdElement for *mut T
+where
+    T: core::ptr::Pointee<Metadata = ()>,
+{
+    type Mask = isize;
+}
diff --git a/crates/core_simd/src/vector/ptr.rs b/crates/core_simd/src/vector/ptr.rs
deleted file mode 100644
index fa756344db9..00000000000
--- a/crates/core_simd/src/vector/ptr.rs
+++ /dev/null
@@ -1,51 +0,0 @@
-//! Private implementation details of public gather/scatter APIs.
-use crate::simd::intrinsics;
-use crate::simd::{LaneCount, Simd, SupportedLaneCount};
-
-/// A vector of *const T.
-#[derive(Debug, Copy, Clone)]
-#[repr(simd)]
-pub(crate) struct SimdConstPtr<T, const LANES: usize>([*const T; LANES]);
-
-impl<T, const LANES: usize> SimdConstPtr<T, LANES>
-where
-    LaneCount<LANES>: SupportedLaneCount,
-    T: Sized,
-{
-    #[inline]
-    #[must_use]
-    pub fn splat(ptr: *const T) -> Self {
-        Self([ptr; LANES])
-    }
-
-    #[inline]
-    #[must_use]
-    pub fn wrapping_add(self, addend: Simd<usize, LANES>) -> Self {
-        // Safety: this intrinsic doesn't have a precondition
-        unsafe { intrinsics::simd_arith_offset(self, addend) }
-    }
-}
-
-/// A vector of *mut T. Be very careful around potential aliasing.
-#[derive(Debug, Copy, Clone)]
-#[repr(simd)]
-pub(crate) struct SimdMutPtr<T, const LANES: usize>([*mut T; LANES]);
-
-impl<T, const LANES: usize> SimdMutPtr<T, LANES>
-where
-    LaneCount<LANES>: SupportedLaneCount,
-    T: Sized,
-{
-    #[inline]
-    #[must_use]
-    pub fn splat(ptr: *mut T) -> Self {
-        Self([ptr; LANES])
-    }
-
-    #[inline]
-    #[must_use]
-    pub fn wrapping_add(self, addend: Simd<usize, LANES>) -> Self {
-        // Safety: this intrinsic doesn't have a precondition
-        unsafe { intrinsics::simd_arith_offset(self, addend) }
-    }
-}
diff --git a/crates/core_simd/tests/pointers.rs b/crates/core_simd/tests/pointers.rs
new file mode 100644
index 00000000000..8eb0bd84042
--- /dev/null
+++ b/crates/core_simd/tests/pointers.rs
@@ -0,0 +1,59 @@
+#![feature(portable_simd, strict_provenance)]
+
+use core_simd::{Simd, SimdConstPtr, SimdMutPtr};
+
+macro_rules! common_tests {
+    { $constness:ident } => {
+        test_helpers::test_lanes! {
+            fn is_null<const LANES: usize>() {
+                test_helpers::test_unary_mask_elementwise(
+                    &Simd::<*$constness u32, LANES>::is_null,
+                    &<*$constness u32>::is_null,
+                    &|_| true,
+                );
+            }
+
+            fn addr<const LANES: usize>() {
+                test_helpers::test_unary_elementwise(
+                    &Simd::<*$constness u32, LANES>::addr,
+                    &<*$constness u32>::addr,
+                    &|_| true,
+                );
+            }
+
+            fn wrapping_offset<const LANES: usize>() {
+                test_helpers::test_binary_elementwise(
+                    &Simd::<*$constness u32, LANES>::wrapping_offset,
+                    &<*$constness u32>::wrapping_offset,
+                    &|_, _| true,
+                );
+            }
+
+            fn wrapping_add<const LANES: usize>() {
+                test_helpers::test_binary_elementwise(
+                    &Simd::<*$constness u32, LANES>::wrapping_add,
+                    &<*$constness u32>::wrapping_add,
+                    &|_, _| true,
+                );
+            }
+
+            fn wrapping_sub<const LANES: usize>() {
+                test_helpers::test_binary_elementwise(
+                    &Simd::<*$constness u32, LANES>::wrapping_sub,
+                    &<*$constness u32>::wrapping_sub,
+                    &|_, _| true,
+                );
+            }
+        }
+    }
+}
+
+mod const_ptr {
+    use super::*;
+    common_tests! { const }
+}
+
+mod mut_ptr {
+    use super::*;
+    common_tests! { mut }
+}
diff --git a/crates/test_helpers/src/biteq.rs b/crates/test_helpers/src/biteq.rs
index 00350e22418..7d91260d838 100644
--- a/crates/test_helpers/src/biteq.rs
+++ b/crates/test_helpers/src/biteq.rs
@@ -55,6 +55,26 @@ macro_rules! impl_float_biteq {
 
 impl_float_biteq! { f32, f64 }
 
+impl<T> BitEq for *const T {
+    fn biteq(&self, other: &Self) -> bool {
+        self == other
+    }
+
+    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+        write!(f, "{:?}", self)
+    }
+}
+
+impl<T> BitEq for *mut T {
+    fn biteq(&self, other: &Self) -> bool {
+        self == other
+    }
+
+    fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
+        write!(f, "{:?}", self)
+    }
+}
+
 impl<T: BitEq, const N: usize> BitEq for [T; N] {
     fn biteq(&self, other: &Self) -> bool {
         self.iter()
diff --git a/crates/test_helpers/src/lib.rs b/crates/test_helpers/src/lib.rs
index 650eadd12bf..5f2a928b5e4 100644
--- a/crates/test_helpers/src/lib.rs
+++ b/crates/test_helpers/src/lib.rs
@@ -38,6 +38,28 @@ impl_num! { usize }
 impl_num! { f32 }
 impl_num! { f64 }
 
+impl<T> DefaultStrategy for *const T {
+    type Strategy = proptest::strategy::Map<proptest::num::isize::Any, fn(isize) -> *const T>;
+    fn default_strategy() -> Self::Strategy {
+        fn map<T>(x: isize) -> *const T {
+            x as _
+        }
+        use proptest::strategy::Strategy;
+        proptest::num::isize::ANY.prop_map(map)
+    }
+}
+
+impl<T> DefaultStrategy for *mut T {
+    type Strategy = proptest::strategy::Map<proptest::num::isize::Any, fn(isize) -> *mut T>;
+    fn default_strategy() -> Self::Strategy {
+        fn map<T>(x: isize) -> *mut T {
+            x as _
+        }
+        use proptest::strategy::Strategy;
+        proptest::num::isize::ANY.prop_map(map)
+    }
+}
+
 #[cfg(not(target_arch = "wasm32"))]
 impl DefaultStrategy for u128 {
     type Strategy = proptest::num::u128::Any;
@@ -135,21 +157,21 @@ pub fn test_unary_elementwise<Scalar, ScalarResult, Vector, VectorResult, const
     fs: &dyn Fn(Scalar) -> ScalarResult,
     check: &dyn Fn([Scalar; LANES]) -> bool,
 ) where
-    Scalar: Copy + Default + core::fmt::Debug + DefaultStrategy,
-    ScalarResult: Copy + Default + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
+    Scalar: Copy + core::fmt::Debug + DefaultStrategy,
+    ScalarResult: Copy + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
     Vector: Into<[Scalar; LANES]> + From<[Scalar; LANES]> + Copy,
     VectorResult: Into<[ScalarResult; LANES]> + From<[ScalarResult; LANES]> + Copy,
 {
     test_1(&|x: [Scalar; LANES]| {
         proptest::prop_assume!(check(x));
         let result_1: [ScalarResult; LANES] = fv(x.into()).into();
-        let result_2: [ScalarResult; LANES] = {
-            let mut result = [ScalarResult::default(); LANES];
-            for (i, o) in x.iter().zip(result.iter_mut()) {
-                *o = fs(*i);
-            }
-            result
-        };
+        let result_2: [ScalarResult; LANES] = x
+            .iter()
+            .copied()
+            .map(fs)
+            .collect::<Vec<_>>()
+            .try_into()
+            .unwrap();
         crate::prop_assert_biteq!(result_1, result_2);
         Ok(())
     });
@@ -162,7 +184,7 @@ pub fn test_unary_mask_elementwise<Scalar, Vector, Mask, const LANES: usize>(
     fs: &dyn Fn(Scalar) -> bool,
     check: &dyn Fn([Scalar; LANES]) -> bool,
 ) where
-    Scalar: Copy + Default + core::fmt::Debug + DefaultStrategy,
+    Scalar: Copy + core::fmt::Debug + DefaultStrategy,
     Vector: Into<[Scalar; LANES]> + From<[Scalar; LANES]> + Copy,
     Mask: Into<[bool; LANES]> + From<[bool; LANES]> + Copy,
 {
@@ -196,9 +218,9 @@ pub fn test_binary_elementwise<
     fs: &dyn Fn(Scalar1, Scalar2) -> ScalarResult,
     check: &dyn Fn([Scalar1; LANES], [Scalar2; LANES]) -> bool,
 ) where
-    Scalar1: Copy + Default + core::fmt::Debug + DefaultStrategy,
-    Scalar2: Copy + Default + core::fmt::Debug + DefaultStrategy,
-    ScalarResult: Copy + Default + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
+    Scalar1: Copy + core::fmt::Debug + DefaultStrategy,
+    Scalar2: Copy + core::fmt::Debug + DefaultStrategy,
+    ScalarResult: Copy + biteq::BitEq + core::fmt::Debug + DefaultStrategy,
     Vector1: Into<[Scalar1; LANES]> + From<[Scalar1; LANES]> + Copy,
     Vector2: Into<[Scalar2; LANES]> + From<[Scalar2; LANES]> + Copy,
     VectorResult: Into<[ScalarResult; LANES]> + From<[ScalarResult; LANES]> + Copy,
@@ -206,13 +228,14 @@ pub fn test_binary_elementwise<
     test_2(&|x: [Scalar1; LANES], y: [Scalar2; LANES]| {
         proptest::prop_assume!(check(x, y));
         let result_1: [ScalarResult; LANES] = fv(x.into(), y.into()).into();
-        let result_2: [ScalarResult; LANES] = {
-            let mut result = [ScalarResult::default(); LANES];
-            for ((i1, i2), o) in x.iter().zip(y.iter()).zip(result.iter_mut()) {
-                *o = fs(*i1, *i2);
-            }
-            result
-        };
+        let result_2: [ScalarResult; LANES] = x
+            .iter()
+            .copied()
+            .zip(y.iter().copied())
+            .map(|(x, y)| fs(x, y))
+            .collect::<Vec<_>>()
+            .try_into()
+            .unwrap();
         crate::prop_assert_biteq!(result_1, result_2);
         Ok(())
     });