diff options
| author | Tim Neumann <mail@timnn.me> | 2017-03-14 16:35:11 +0100 |
|---|---|---|
| committer | Tim Neumann <mail@timnn.me> | 2017-03-15 06:59:09 +0100 |
| commit | cc23d17ce9288cee77f0441018a248a6bd106880 (patch) | |
| tree | af8f25a9b894fc143c1b2f1a88e2d9b94335fa5a /src/libcore/num | |
| parent | 6f10e2f63de720468e2b4bfcb275e4b90b1f9870 (diff) | |
| download | rust-cc23d17ce9288cee77f0441018a248a6bd106880.tar.gz rust-cc23d17ce9288cee77f0441018a248a6bd106880.zip | |
make shift builtins panic-free with new unchecked_sh* intrinsics
Also update some 128 bit builtins to be panic-free without relying on the const evaluator.
Diffstat (limited to 'src/libcore/num')
| -rw-r--r-- | src/libcore/num/mod.rs | 113 |
1 files changed, 95 insertions, 18 deletions
diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs index 97ea6bb347b..d12002fdfa7 100644 --- a/src/libcore/num/mod.rs +++ b/src/libcore/num/mod.rs @@ -177,7 +177,7 @@ macro_rules! checked_op { // `Int` + `SignedInt` implemented for signed integers macro_rules! int_impl { - ($ActualT:ident, $UnsignedT:ty, $BITS:expr, + ($SelfT:ty, $ActualT:ident, $UnsignedT:ty, $BITS:expr, $add_with_overflow:path, $sub_with_overflow:path, $mul_with_overflow:path) => { @@ -850,6 +850,16 @@ macro_rules! int_impl { /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] + #[cfg(not(stage0))] + pub fn wrapping_shl(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) + } + } + + /// Stage 0 + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[cfg(stage0)] pub fn wrapping_shl(self, rhs: u32) -> Self { self.overflowing_shl(rhs).0 } @@ -875,6 +885,16 @@ macro_rules! int_impl { /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] + #[cfg(not(stage0))] + pub fn wrapping_shr(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) + } + } + + /// Stage 0 + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[cfg(stage0)] pub fn wrapping_shr(self, rhs: u32) -> Self { self.overflowing_shr(rhs).0 } @@ -1089,6 +1109,15 @@ macro_rules! int_impl { /// ``` #[inline] #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(not(stage0))] + pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) + } + + /// Stage 0 + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(stage0)] pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { (self << (rhs & ($BITS - 1)), (rhs > ($BITS - 1))) } @@ -1111,6 +1140,15 @@ macro_rules! int_impl { /// ``` #[inline] #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(not(stage0))] + pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) + } + + /// Stage 0 + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(stage0)] pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { (self >> (rhs & ($BITS - 1)), (rhs > ($BITS - 1))) } @@ -1268,7 +1306,7 @@ macro_rules! int_impl { #[lang = "i8"] impl i8 { - int_impl! { i8, u8, 8, + int_impl! { i8, i8, u8, 8, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1276,7 +1314,7 @@ impl i8 { #[lang = "i16"] impl i16 { - int_impl! { i16, u16, 16, + int_impl! { i16, i16, u16, 16, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1284,7 +1322,7 @@ impl i16 { #[lang = "i32"] impl i32 { - int_impl! { i32, u32, 32, + int_impl! { i32, i32, u32, 32, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1292,7 +1330,7 @@ impl i32 { #[lang = "i64"] impl i64 { - int_impl! { i64, u64, 64, + int_impl! { i64, i64, u64, 64, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1300,7 +1338,7 @@ impl i64 { #[lang = "i128"] impl i128 { - int_impl! { i128, u128, 128, + int_impl! { i128, i128, u128, 128, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1309,7 +1347,7 @@ impl i128 { #[cfg(target_pointer_width = "16")] #[lang = "isize"] impl isize { - int_impl! { i16, u16, 16, + int_impl! { isize, i16, u16, 16, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1318,7 +1356,7 @@ impl isize { #[cfg(target_pointer_width = "32")] #[lang = "isize"] impl isize { - int_impl! { i32, u32, 32, + int_impl! { isize, i32, u32, 32, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1327,7 +1365,7 @@ impl isize { #[cfg(target_pointer_width = "64")] #[lang = "isize"] impl isize { - int_impl! { i64, u64, 64, + int_impl! { isize, i64, u64, 64, intrinsics::add_with_overflow, intrinsics::sub_with_overflow, intrinsics::mul_with_overflow } @@ -1335,7 +1373,7 @@ impl isize { // `Int` + `UnsignedInt` implemented for unsigned integers macro_rules! uint_impl { - ($ActualT:ty, $BITS:expr, + ($SelfT:ty, $ActualT:ty, $BITS:expr, $ctpop:path, $ctlz:path, $cttz:path, @@ -1978,6 +2016,16 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] + #[cfg(not(stage0))] + pub fn wrapping_shl(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shl(self, (rhs & ($BITS - 1)) as $SelfT) + } + } + + /// Stage 0 + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[cfg(stage0)] pub fn wrapping_shl(self, rhs: u32) -> Self { self.overflowing_shl(rhs).0 } @@ -2003,6 +2051,16 @@ macro_rules! uint_impl { /// ``` #[stable(feature = "num_wrapping", since = "1.2.0")] #[inline(always)] + #[cfg(not(stage0))] + pub fn wrapping_shr(self, rhs: u32) -> Self { + unsafe { + intrinsics::unchecked_shr(self, (rhs & ($BITS - 1)) as $SelfT) + } + } + + /// Stage 0 + #[stable(feature = "num_wrapping", since = "1.2.0")] + #[cfg(stage0)] pub fn wrapping_shr(self, rhs: u32) -> Self { self.overflowing_shr(rhs).0 } @@ -2170,6 +2228,15 @@ macro_rules! uint_impl { /// ``` #[inline] #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(not(stage0))] + pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shl(rhs), (rhs > ($BITS - 1))) + } + + /// Stage 0 + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(stage0)] pub fn overflowing_shl(self, rhs: u32) -> (Self, bool) { (self << (rhs & ($BITS - 1)), (rhs > ($BITS - 1))) } @@ -2192,6 +2259,16 @@ macro_rules! uint_impl { /// ``` #[inline] #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(not(stage0))] + pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { + (self.wrapping_shr(rhs), (rhs > ($BITS - 1))) + + } + + /// Stage 0 + #[inline] + #[stable(feature = "wrapping", since = "1.7.0")] + #[cfg(stage0)] pub fn overflowing_shr(self, rhs: u32) -> (Self, bool) { (self >> (rhs & ($BITS - 1)), (rhs > ($BITS - 1))) } @@ -2292,7 +2369,7 @@ macro_rules! uint_impl { #[lang = "u8"] impl u8 { - uint_impl! { u8, 8, + uint_impl! { u8, u8, 8, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2304,7 +2381,7 @@ impl u8 { #[lang = "u16"] impl u16 { - uint_impl! { u16, 16, + uint_impl! { u16, u16, 16, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2316,7 +2393,7 @@ impl u16 { #[lang = "u32"] impl u32 { - uint_impl! { u32, 32, + uint_impl! { u32, u32, 32, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2328,7 +2405,7 @@ impl u32 { #[lang = "u64"] impl u64 { - uint_impl! { u64, 64, + uint_impl! { u64, u64, 64, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2340,7 +2417,7 @@ impl u64 { #[lang = "u128"] impl u128 { - uint_impl! { u128, 128, + uint_impl! { u128, u128, 128, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2353,7 +2430,7 @@ impl u128 { #[cfg(target_pointer_width = "16")] #[lang = "usize"] impl usize { - uint_impl! { u16, 16, + uint_impl! { usize, u16, 16, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2365,7 +2442,7 @@ impl usize { #[cfg(target_pointer_width = "32")] #[lang = "usize"] impl usize { - uint_impl! { u32, 32, + uint_impl! { usize, u32, 32, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, @@ -2378,7 +2455,7 @@ impl usize { #[cfg(target_pointer_width = "64")] #[lang = "usize"] impl usize { - uint_impl! { u64, 64, + uint_impl! { usize, u64, 64, intrinsics::ctpop, intrinsics::ctlz, intrinsics::cttz, |
