about summary refs log tree commit diff
diff options
context:
space:
mode:
authorJubilee <46493976+workingjubilee@users.noreply.github.com>2022-02-08 17:38:21 -0800
committerJubilee Young <workingjubilee@gmail.com>2022-02-08 17:43:26 -0800
commit5d52455c65bf5a5eb258ed11591b8ebfa61ea5c7 (patch)
treee467414a84d310741b5c95ce8a217044f14cc18f
parente628a2991c47a771340cc5f8f06826c918f79609 (diff)
downloadrust-5d52455c65bf5a5eb258ed11591b8ebfa61ea5c7.tar.gz
rust-5d52455c65bf5a5eb258ed11591b8ebfa61ea5c7.zip
Review for clarity and concision
Co-authored-by: Caleb Zulawski <caleb.zulawski@gmail.com>
-rw-r--r--crates/core_simd/src/vector.rs9
-rw-r--r--crates/core_simd/tests/ops_macros.rs4
2 files changed, 6 insertions, 7 deletions
diff --git a/crates/core_simd/src/vector.rs b/crates/core_simd/src/vector.rs
index 0a2f681f66b..5bd8ed69535 100644
--- a/crates/core_simd/src/vector.rs
+++ b/crates/core_simd/src/vector.rs
@@ -13,10 +13,9 @@ use crate::simd::intrinsics;
 use crate::simd::{LaneCount, Mask, MaskElement, SupportedLaneCount};
 
 /// A SIMD vector of `LANES` elements of type `T`. `Simd<T, N>` has the same shape as [`[T; N]`](array), but operates like `T`.
-/// This type is commonly known by names like `f32x4` or `Vec4` in many programming languages.
 ///
-/// Two vectors of the same type and length will, by convention, support the binary operations (+, *, etc.) that `T` does.
-/// These take the lanes at each index on the left-hand side and right-hand side, perform the binary operation,
+/// Two vectors of the same type and length will, by convention, support the operators (+, *, etc.) that `T` does.
+/// These take the lanes at each index on the left-hand side and right-hand side, perform the operation,
 /// and return the result in the same lane in a vector of equal size. For a given operator, this is equivalent to zipping
 /// the two arrays together and mapping the operator over each lane.
 ///
@@ -29,14 +28,14 @@ use crate::simd::{LaneCount, Mask, MaskElement, SupportedLaneCount};
 /// let zm_mul = a0.zip(a1).map(|(lhs, rhs)| lhs * rhs);
 ///
 /// // `Simd<T, N>` implements `From<[T; N]>
-/// let [v0, v1] = [a0, a1].map(|a| Simd::from(a));
+/// let (v0, v1) = (Simd::from(a0), Simd::from(a1));
 /// // Which means arrays implement `Into<Simd<T, N>>`.
 /// assert_eq!(v0 + v1, zm_add.into());
 /// assert_eq!(v0 * v1, zm_mul.into());
 /// ```
 ///
 /// `Simd` with integers has the quirk that these operations are also inherently wrapping, as if `T` was [`Wrapping<T>`].
-/// Thus, `Simd` does not implement `wrapping_add`, because that is the behavior of the normal operation.
+/// Thus, `Simd` does not implement `wrapping_add`, because that is the default behavior.
 /// This means there is no warning on overflows, even in "debug" builds.
 /// For most applications where `Simd` is appropriate, it is "not a bug" to wrap,
 /// and even "debug builds" are unlikely to tolerate the loss of performance.
diff --git a/crates/core_simd/tests/ops_macros.rs b/crates/core_simd/tests/ops_macros.rs
index 9ba66fb8dd9..50f7a4ca170 100644
--- a/crates/core_simd/tests/ops_macros.rs
+++ b/crates/core_simd/tests/ops_macros.rs
@@ -213,13 +213,13 @@ macro_rules! impl_signed_tests {
                 fn div_min_may_overflow<const LANES: usize>() {
                     let a = Vector::<LANES>::splat(Scalar::MIN);
                     let b = Vector::<LANES>::splat(-1);
-                    assert_eq!(a / b, a / (b * b));
+                    assert_eq!(a / b, a);
                 }
 
                 fn rem_min_may_overflow<const LANES: usize>() {
                     let a = Vector::<LANES>::splat(Scalar::MIN);
                     let b = Vector::<LANES>::splat(-1);
-                    assert_eq!(a % b, a % (b * b));
+                    assert_eq!(a % b, Vector::<LANES>::splat(0));
                 }
 
             }