diff options
| author | Scott McMurray <scottmcm@users.noreply.github.com> | 2024-04-17 23:55:35 -0700 |
|---|---|---|
| committer | Scott McMurray <scottmcm@users.noreply.github.com> | 2024-04-18 18:11:21 -0700 |
| commit | 986d9f104b2da25f7388ad824b9868c3ce1e5f21 (patch) | |
| tree | 7ac1d3527e66ce610f6a69835bc5369f8334ff14 | |
| parent | e3181b091e88321f5ea149afed6db0edf0a4f37b (diff) | |
| download | rust-986d9f104b2da25f7388ad824b9868c3ce1e5f21.tar.gz rust-986d9f104b2da25f7388ad824b9868c3ce1e5f21.zip | |
Make `checked` ops emit *unchecked* LLVM operations where feasible
For things with easily pre-checked overflow conditions -- shifts and unsigned subtraction -- write then checked methods in such a way that we stop emitting wrapping versions of them. For example, today <https://rust.godbolt.org/z/qM9YK8Txb> neither ```rust a.checked_sub(b).unwrap() ``` nor ```rust a.checked_sub(b).unwrap_unchecked() ``` actually optimizes to `sub nuw`. After this PR they do.
| -rw-r--r-- | library/core/src/num/int_macros.rs | 22 | ||||
| -rw-r--r-- | library/core/src/num/uint_macros.rs | 35 | ||||
| -rw-r--r-- | tests/codegen/checked_math.rs | 86 | ||||
| -rw-r--r-- | tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-abort.mir | 53 | ||||
| -rw-r--r-- | tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-unwind.mir | 53 | ||||
| -rw-r--r-- | tests/mir-opt/pre-codegen/checked_ops.rs | 2 |
6 files changed, 160 insertions, 91 deletions
diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index e34e9b7fff6..b6aab9be358 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -1163,12 +1163,19 @@ macro_rules! int_impl { /// ``` #[stable(feature = "wrapping", since = "1.7.0")] #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")] + // We could always go back to wrapping + #[rustc_allow_const_fn_unstable(unchecked_shifts)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] pub const fn checked_shl(self, rhs: u32) -> Option<Self> { - let (a, b) = self.overflowing_shl(rhs); - if unlikely!(b) { None } else { Some(a) } + // Not using overflowing_shl as that's a wrapping shift + if rhs < Self::BITS { + // SAFETY: just checked the RHS is in-range + Some(unsafe { self.unchecked_shl(rhs) }) + } else { + None + } } /// Strict shift left. Computes `self << rhs`, panicking if `rhs` is larger @@ -1254,12 +1261,19 @@ macro_rules! int_impl { /// ``` #[stable(feature = "wrapping", since = "1.7.0")] #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")] + // We could always go back to wrapping + #[rustc_allow_const_fn_unstable(unchecked_shifts)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] pub const fn checked_shr(self, rhs: u32) -> Option<Self> { - let (a, b) = self.overflowing_shr(rhs); - if unlikely!(b) { None } else { Some(a) } + // Not using overflowing_shr as that's a wrapping shift + if rhs < Self::BITS { + // SAFETY: just checked the RHS is in-range + Some(unsafe { self.unchecked_shr(rhs) }) + } else { + None + } } /// Strict shift right. Computes `self >> rhs`, panicking `rhs` is diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index ba6a243041c..58f713baa57 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -579,8 +579,17 @@ macro_rules! uint_impl { without modifying the original"] #[inline] pub const fn checked_sub(self, rhs: Self) -> Option<Self> { - let (a, b) = self.overflowing_sub(rhs); - if unlikely!(b) { None } else { Some(a) } + // Per PR#103299, there's no advantage to the `overflowing` intrinsic + // for *unsigned* subtraction and we just emit the manual check anyway. + // Thus, rather than using `overflowing_sub` that produces a wrapping + // subtraction, check it ourself so we can use an unchecked one. + + if self >= rhs { + // SAFETY: just checked this can't overflow + Some(unsafe { intrinsics::unchecked_sub(self, rhs) }) + } else { + None + } } /// Strict integer subtraction. Computes `self - rhs`, panicking if @@ -1222,12 +1231,19 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "wrapping", since = "1.7.0")] #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")] + // We could always go back to wrapping + #[rustc_allow_const_fn_unstable(unchecked_shifts)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] pub const fn checked_shl(self, rhs: u32) -> Option<Self> { - let (a, b) = self.overflowing_shl(rhs); - if unlikely!(b) { None } else { Some(a) } + // Not using overflowing_shl as that's a wrapping shift + if rhs < Self::BITS { + // SAFETY: just checked the RHS is in-range + Some(unsafe { self.unchecked_shl(rhs) }) + } else { + None + } } /// Strict shift left. Computes `self << rhs`, panicking if `rhs` is larger @@ -1313,12 +1329,19 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "wrapping", since = "1.7.0")] #[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")] + // We could always go back to wrapping + #[rustc_allow_const_fn_unstable(unchecked_shifts)] #[must_use = "this returns the result of the operation, \ without modifying the original"] #[inline] pub const fn checked_shr(self, rhs: u32) -> Option<Self> { - let (a, b) = self.overflowing_shr(rhs); - if unlikely!(b) { None } else { Some(a) } + // Not using overflowing_shr as that's a wrapping shift + if rhs < Self::BITS { + // SAFETY: just checked the RHS is in-range + Some(unsafe { self.unchecked_shr(rhs) }) + } else { + None + } } /// Strict shift right. Computes `self >> rhs`, panicking `rhs` is diff --git a/tests/codegen/checked_math.rs b/tests/codegen/checked_math.rs new file mode 100644 index 00000000000..41016e3b7be --- /dev/null +++ b/tests/codegen/checked_math.rs @@ -0,0 +1,86 @@ +//@ compile-flags: -O -Z merge-functions=disabled + +#![crate_type = "lib"] +#![feature(unchecked_shifts)] + +// Because the result of something like `u32::checked_sub` can only be used if it +// didn't overflow, make sure that LLVM actually knows that in optimized builds. +// Thanks to poison semantics, this doesn't even need branches. + +// CHECK-LABEL: @checked_sub_unsigned +// CHECK-SAME: (i16 noundef %a, i16 noundef %b) +#[no_mangle] +pub fn checked_sub_unsigned(a: u16, b: u16) -> Option<u16> { + // CHECK-DAG: %[[IS_SOME:.+]] = icmp uge i16 %a, %b + // CHECK-DAG: %[[DIFF_P:.+]] = sub nuw i16 %a, %b + // CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i16 + // CHECK-DAG: %[[DIFF_U:.+]] = select i1 %[[IS_SOME]], i16 %[[DIFF_P]], i16 undef + + // CHECK: %[[R0:.+]] = insertvalue { i16, i16 } poison, i16 %[[DISCR]], 0 + // CHECK: %[[R1:.+]] = insertvalue { i16, i16 } %[[R0]], i16 %[[DIFF_U]], 1 + // CHECK: ret { i16, i16 } %[[R1]] + a.checked_sub(b) +} + +// Note that `shl` and `shr` in LLVM are already unchecked. So rather than +// looking for no-wrap flags, we just need there to not be any masking. + +// CHECK-LABEL: @checked_shl_unsigned +// CHECK-SAME: (i32 noundef %a, i32 noundef %b) +#[no_mangle] +pub fn checked_shl_unsigned(a: u32, b: u32) -> Option<u32> { + // CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32 + // CHECK-DAG: %[[SHIFTED_P:.+]] = shl i32 %a, %b + // CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32 + // CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef + + // CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0 + // CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1 + // CHECK: ret { i32, i32 } %[[R1]] + a.checked_shl(b) +} + +// CHECK-LABEL: @checked_shr_unsigned +// CHECK-SAME: (i32 noundef %a, i32 noundef %b) +#[no_mangle] +pub fn checked_shr_unsigned(a: u32, b: u32) -> Option<u32> { + // CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32 + // CHECK-DAG: %[[SHIFTED_P:.+]] = lshr i32 %a, %b + // CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32 + // CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef + + // CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0 + // CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1 + // CHECK: ret { i32, i32 } %[[R1]] + a.checked_shr(b) +} + +// CHECK-LABEL: @checked_shl_signed +// CHECK-SAME: (i32 noundef %a, i32 noundef %b) +#[no_mangle] +pub fn checked_shl_signed(a: i32, b: u32) -> Option<i32> { + // CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32 + // CHECK-DAG: %[[SHIFTED_P:.+]] = shl i32 %a, %b + // CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32 + // CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef + + // CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0 + // CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1 + // CHECK: ret { i32, i32 } %[[R1]] + a.checked_shl(b) +} + +// CHECK-LABEL: @checked_shr_signed +// CHECK-SAME: (i32 noundef %a, i32 noundef %b) +#[no_mangle] +pub fn checked_shr_signed(a: i32, b: u32) -> Option<i32> { + // CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32 + // CHECK-DAG: %[[SHIFTED_P:.+]] = ashr i32 %a, %b + // CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32 + // CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef + + // CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0 + // CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1 + // CHECK: ret { i32, i32 } %[[R1]] + a.checked_shr(b) +} diff --git a/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-abort.mir b/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-abort.mir index 845673601b2..af0a0efc2a6 100644 --- a/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-abort.mir +++ b/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-abort.mir @@ -5,60 +5,33 @@ fn checked_shl(_1: u32, _2: u32) -> Option<u32> { debug rhs => _2; let mut _0: std::option::Option<u32>; scope 1 (inlined core::num::<impl u32>::checked_shl) { - debug self => _1; - debug rhs => _2; - let mut _6: bool; - scope 2 { - debug a => _4; - debug b => _5; - } - scope 3 (inlined core::num::<impl u32>::overflowing_shl) { - debug self => _1; - debug rhs => _2; - let mut _4: u32; - let mut _5: bool; - scope 4 (inlined core::num::<impl u32>::wrapping_shl) { - debug self => _1; - debug rhs => _2; - let mut _3: u32; - scope 5 (inlined core::num::<impl u32>::unchecked_shl) { - debug self => _1; - debug rhs => _3; - } - } + let mut _3: bool; + let mut _4: u32; + scope 2 (inlined core::num::<impl u32>::unchecked_shl) { } } bb0: { - StorageLive(_4); - StorageLive(_5); StorageLive(_3); - _3 = BitAnd(_2, const 31_u32); - _4 = ShlUnchecked(_1, _3); - StorageDead(_3); - _5 = Ge(_2, const core::num::<impl u32>::BITS); - StorageLive(_6); - _6 = unlikely(move _5) -> [return: bb1, unwind unreachable]; + _3 = Lt(_2, const core::num::<impl u32>::BITS); + switchInt(move _3) -> [0: bb1, otherwise: bb2]; } bb1: { - switchInt(move _6) -> [0: bb2, otherwise: bb3]; + _0 = const Option::<u32>::None; + goto -> bb3; } bb2: { - _0 = Option::<u32>::Some(_4); - goto -> bb4; + StorageLive(_4); + _4 = ShlUnchecked(_1, _2); + _0 = Option::<u32>::Some(move _4); + StorageDead(_4); + goto -> bb3; } bb3: { - _0 = const Option::<u32>::None; - goto -> bb4; - } - - bb4: { - StorageDead(_6); - StorageDead(_5); - StorageDead(_4); + StorageDead(_3); return; } } diff --git a/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-unwind.mir index 845673601b2..af0a0efc2a6 100644 --- a/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.panic-unwind.mir @@ -5,60 +5,33 @@ fn checked_shl(_1: u32, _2: u32) -> Option<u32> { debug rhs => _2; let mut _0: std::option::Option<u32>; scope 1 (inlined core::num::<impl u32>::checked_shl) { - debug self => _1; - debug rhs => _2; - let mut _6: bool; - scope 2 { - debug a => _4; - debug b => _5; - } - scope 3 (inlined core::num::<impl u32>::overflowing_shl) { - debug self => _1; - debug rhs => _2; - let mut _4: u32; - let mut _5: bool; - scope 4 (inlined core::num::<impl u32>::wrapping_shl) { - debug self => _1; - debug rhs => _2; - let mut _3: u32; - scope 5 (inlined core::num::<impl u32>::unchecked_shl) { - debug self => _1; - debug rhs => _3; - } - } + let mut _3: bool; + let mut _4: u32; + scope 2 (inlined core::num::<impl u32>::unchecked_shl) { } } bb0: { - StorageLive(_4); - StorageLive(_5); StorageLive(_3); - _3 = BitAnd(_2, const 31_u32); - _4 = ShlUnchecked(_1, _3); - StorageDead(_3); - _5 = Ge(_2, const core::num::<impl u32>::BITS); - StorageLive(_6); - _6 = unlikely(move _5) -> [return: bb1, unwind unreachable]; + _3 = Lt(_2, const core::num::<impl u32>::BITS); + switchInt(move _3) -> [0: bb1, otherwise: bb2]; } bb1: { - switchInt(move _6) -> [0: bb2, otherwise: bb3]; + _0 = const Option::<u32>::None; + goto -> bb3; } bb2: { - _0 = Option::<u32>::Some(_4); - goto -> bb4; + StorageLive(_4); + _4 = ShlUnchecked(_1, _2); + _0 = Option::<u32>::Some(move _4); + StorageDead(_4); + goto -> bb3; } bb3: { - _0 = const Option::<u32>::None; - goto -> bb4; - } - - bb4: { - StorageDead(_6); - StorageDead(_5); - StorageDead(_4); + StorageDead(_3); return; } } diff --git a/tests/mir-opt/pre-codegen/checked_ops.rs b/tests/mir-opt/pre-codegen/checked_ops.rs index 8dd5c4b495e..56f8e3f8338 100644 --- a/tests/mir-opt/pre-codegen/checked_ops.rs +++ b/tests/mir-opt/pre-codegen/checked_ops.rs @@ -1,5 +1,5 @@ // skip-filecheck -//@ compile-flags: -O -Zmir-opt-level=2 -Cdebuginfo=2 +//@ compile-flags: -O -Zmir-opt-level=2 // EMIT_MIR_FOR_EACH_PANIC_STRATEGY #![crate_type = "lib"] |
