about summary refs log tree commit diff
path: root/src/libcore/num
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2015-11-01 07:03:09 +0000
committerbors <bors@rust-lang.org>2015-11-01 07:03:09 +0000
commita5fbb3a25faeb08f50f571a6aa227f14a9d086c2 (patch)
tree8a3793a0770bb666a939b0d2c7057b346c202119 /src/libcore/num
parentb252f4c826242cf552831155abb6350cefcb2347 (diff)
parent579420fbdd9951ae230a9def03e157d9b9957b2f (diff)
downloadrust-a5fbb3a25faeb08f50f571a6aa227f14a9d086c2.tar.gz
rust-a5fbb3a25faeb08f50f571a6aa227f14a9d086c2.zip
Auto merge of #29316 - GBGamer:change-unchecked-div-generic, r=eddyb
Similarly to the simd intrinsics. I believe this is a better solution than #29288, and I could implement it as well for overflowing_add/sub/mul. Also rename from udiv/sdiv to div, and same for rem.
Diffstat (limited to 'src/libcore/num')
-rw-r--r--src/libcore/num/bignum.rs11
-rw-r--r--src/libcore/num/mod.rs170
-rw-r--r--src/libcore/num/wrapping.rs182
3 files changed, 343 insertions, 20 deletions
diff --git a/src/libcore/num/bignum.rs b/src/libcore/num/bignum.rs
index 18b34e24fcb..5d15ada4e75 100644
--- a/src/libcore/num/bignum.rs
+++ b/src/libcore/num/bignum.rs
@@ -55,6 +55,7 @@ macro_rules! impl_full_ops {
     ($($ty:ty: add($addfn:path), mul/div($bigty:ident);)*) => (
         $(
             impl FullOps for $ty {
+                #[cfg(stage0)]
                 fn full_add(self, other: $ty, carry: bool) -> (bool, $ty) {
                     // this cannot overflow, the output is between 0 and 2*2^nbits - 1
                     // FIXME will LLVM optimize this into ADC or similar???
@@ -62,6 +63,16 @@ macro_rules! impl_full_ops {
                     let (v, carry2) = unsafe { $addfn(v, if carry {1} else {0}) };
                     (carry1 || carry2, v)
                 }
+                #[cfg(not(stage0))]
+                fn full_add(self, other: $ty, carry: bool) -> (bool, $ty) {
+                    // this cannot overflow, the output is between 0 and 2*2^nbits - 1
+                    // FIXME will LLVM optimize this into ADC or similar???
+                    let (v, carry1) = unsafe { intrinsics::add_with_overflow(self, other) };
+                    let (v, carry2) = unsafe {
+                        intrinsics::add_with_overflow(v, if carry {1} else {0})
+                    };
+                    (carry1 || carry2, v)
+                }
 
                 fn full_mul(self, other: $ty, carry: $ty) -> ($ty, $ty) {
                     // this cannot overflow, the output is between 0 and 2^nbits * (2^nbits - 1)
diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs
index 801315d86db..c397d02d245 100644
--- a/src/libcore/num/mod.rs
+++ b/src/libcore/num/mod.rs
@@ -103,6 +103,11 @@ macro_rules! zero_one_impl_float {
 }
 zero_one_impl_float! { f32 f64 }
 
+// Just for stage0; a byte swap on a byte is a no-op
+// Delete this once it becomes unused
+#[cfg(stage0)]
+unsafe fn bswap8(x: u8) -> u8 { x }
+
 macro_rules! checked_op {
     ($U:ty, $op:path, $x:expr, $y:expr) => {{
         let (result, overflowed) = unsafe { $op($x as $U, $y as $U) };
@@ -110,10 +115,6 @@ macro_rules! checked_op {
     }}
 }
 
-/// Swapping a single byte is a no-op. This is marked as `unsafe` for
-/// consistency with the other `bswap` intrinsics.
-unsafe fn bswap8(x: u8) -> u8 { x }
-
 // `Int` + `SignedInt` implemented for signed integers
 macro_rules! int_impl {
     ($ActualT:ty, $UnsignedT:ty, $BITS:expr,
@@ -611,54 +612,110 @@ macro_rules! int_impl {
 }
 
 #[lang = "i8"]
+#[cfg(stage0)]
 impl i8 {
     int_impl! { i8, u8, 8,
         intrinsics::i8_add_with_overflow,
         intrinsics::i8_sub_with_overflow,
         intrinsics::i8_mul_with_overflow }
 }
+#[lang = "i8"]
+#[cfg(not(stage0))]
+impl i8 {
+    int_impl! { i8, u8, 8,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 #[lang = "i16"]
+#[cfg(stage0)]
 impl i16 {
     int_impl! { i16, u16, 16,
         intrinsics::i16_add_with_overflow,
         intrinsics::i16_sub_with_overflow,
         intrinsics::i16_mul_with_overflow }
 }
+#[lang = "i16"]
+#[cfg(not(stage0))]
+impl i16 {
+    int_impl! { i16, u16, 16,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 #[lang = "i32"]
+#[cfg(stage0)]
 impl i32 {
     int_impl! { i32, u32, 32,
         intrinsics::i32_add_with_overflow,
         intrinsics::i32_sub_with_overflow,
         intrinsics::i32_mul_with_overflow }
 }
+#[lang = "i32"]
+#[cfg(not(stage0))]
+impl i32 {
+    int_impl! { i32, u32, 32,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 #[lang = "i64"]
+#[cfg(stage0)]
 impl i64 {
     int_impl! { i64, u64, 64,
         intrinsics::i64_add_with_overflow,
         intrinsics::i64_sub_with_overflow,
         intrinsics::i64_mul_with_overflow }
 }
+#[lang = "i64"]
+#[cfg(not(stage0))]
+impl i64 {
+    int_impl! { i64, u64, 64,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 #[cfg(target_pointer_width = "32")]
 #[lang = "isize"]
+#[cfg(stage0)]
 impl isize {
     int_impl! { i32, u32, 32,
         intrinsics::i32_add_with_overflow,
         intrinsics::i32_sub_with_overflow,
         intrinsics::i32_mul_with_overflow }
 }
+#[cfg(target_pointer_width = "32")]
+#[lang = "isize"]
+#[cfg(not(stage0))]
+impl isize {
+    int_impl! { i32, u32, 32,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 #[cfg(target_pointer_width = "64")]
 #[lang = "isize"]
+#[cfg(stage0)]
 impl isize {
     int_impl! { i64, u64, 64,
         intrinsics::i64_add_with_overflow,
         intrinsics::i64_sub_with_overflow,
         intrinsics::i64_mul_with_overflow }
 }
+#[cfg(target_pointer_width = "64")]
+#[lang = "isize"]
+#[cfg(not(stage0))]
+impl isize {
+    int_impl! { i64, u64, 64,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 // `Int` + `UnsignedInt` implemented for signed integers
 macro_rules! uint_impl {
@@ -744,6 +801,25 @@ macro_rules! uint_impl {
             unsafe { $ctlz(self as $ActualT) as u32 }
         }
 
+        #[stable(feature = "rust1", since = "1.0.0")]
+        #[cfg(stage0)]
+        #[inline]
+        pub fn trailing_zeros(self) -> u32 {
+            // As of LLVM 3.6 the codegen for the zero-safe cttz8 intrinsic
+            // emits two conditional moves on x86_64. By promoting the value to
+            // u16 and setting bit 8, we get better code without any conditional
+            // operations.
+            // FIXME: There's a LLVM patch (http://reviews.llvm.org/D9284)
+            // pending, remove this workaround once LLVM generates better code
+            // for cttz8.
+            unsafe {
+                if $BITS == 8 {
+                    intrinsics::cttz16(self as u16 | 0x100) as u32
+                } else {
+                    $cttz(self as $ActualT) as u32
+                }
+            }
+        }
         /// Returns the number of trailing zeros in the binary representation
         /// of `self`.
         ///
@@ -755,6 +831,7 @@ macro_rules! uint_impl {
         /// assert_eq!(n.trailing_zeros(), 3);
         /// ```
         #[stable(feature = "rust1", since = "1.0.0")]
+        #[cfg(not(stage0))]
         #[inline]
         pub fn trailing_zeros(self) -> u32 {
             // As of LLVM 3.6 the codegen for the zero-safe cttz8 intrinsic
@@ -766,9 +843,9 @@ macro_rules! uint_impl {
             // for cttz8.
             unsafe {
                 if $BITS == 8 {
-                    intrinsics::cttz16(self as u16 | 0x100) as u32
+                    intrinsics::cttz(self as u16 | 0x100) as u32
                 } else {
-                    $cttz(self as $ActualT) as u32
+                    intrinsics::cttz(self) as u32
                 }
             }
         }
@@ -1163,6 +1240,7 @@ macro_rules! uint_impl {
 }
 
 #[lang = "u8"]
+#[cfg(stage0)]
 impl u8 {
     uint_impl! { u8, 8,
         intrinsics::ctpop8,
@@ -1173,8 +1251,21 @@ impl u8 {
         intrinsics::u8_sub_with_overflow,
         intrinsics::u8_mul_with_overflow }
 }
+#[lang = "u8"]
+#[cfg(not(stage0))]
+impl u8 {
+    uint_impl! { u8, 8,
+        intrinsics::ctpop,
+        intrinsics::ctlz,
+        intrinsics::cttz,
+        intrinsics::bswap,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 #[lang = "u16"]
+#[cfg(stage0)]
 impl u16 {
     uint_impl! { u16, 16,
         intrinsics::ctpop16,
@@ -1185,8 +1276,21 @@ impl u16 {
         intrinsics::u16_sub_with_overflow,
         intrinsics::u16_mul_with_overflow }
 }
+#[lang = "u16"]
+#[cfg(not(stage0))]
+impl u16 {
+    uint_impl! { u16, 16,
+        intrinsics::ctpop,
+        intrinsics::ctlz,
+        intrinsics::cttz,
+        intrinsics::bswap,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 #[lang = "u32"]
+#[cfg(stage0)]
 impl u32 {
     uint_impl! { u32, 32,
         intrinsics::ctpop32,
@@ -1197,9 +1301,21 @@ impl u32 {
         intrinsics::u32_sub_with_overflow,
         intrinsics::u32_mul_with_overflow }
 }
-
+#[lang = "u32"]
+#[cfg(not(stage0))]
+impl u32 {
+    uint_impl! { u32, 32,
+        intrinsics::ctpop,
+        intrinsics::ctlz,
+        intrinsics::cttz,
+        intrinsics::bswap,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 #[lang = "u64"]
+#[cfg(stage0)]
 impl u64 {
     uint_impl! { u64, 64,
         intrinsics::ctpop64,
@@ -1210,9 +1326,22 @@ impl u64 {
         intrinsics::u64_sub_with_overflow,
         intrinsics::u64_mul_with_overflow }
 }
+#[lang = "u64"]
+#[cfg(not(stage0))]
+impl u64 {
+    uint_impl! { u64, 64,
+        intrinsics::ctpop,
+        intrinsics::ctlz,
+        intrinsics::cttz,
+        intrinsics::bswap,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 #[cfg(target_pointer_width = "32")]
 #[lang = "usize"]
+#[cfg(stage0)]
 impl usize {
     uint_impl! { u32, 32,
         intrinsics::ctpop32,
@@ -1223,9 +1352,23 @@ impl usize {
         intrinsics::u32_sub_with_overflow,
         intrinsics::u32_mul_with_overflow }
 }
+#[cfg(target_pointer_width = "32")]
+#[lang = "usize"]
+#[cfg(not(stage0))]
+impl usize {
+    uint_impl! { u32, 32,
+        intrinsics::ctpop,
+        intrinsics::ctlz,
+        intrinsics::cttz,
+        intrinsics::bswap,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 #[cfg(target_pointer_width = "64")]
 #[lang = "usize"]
+#[cfg(stage0)]
 impl usize {
     uint_impl! { u64, 64,
         intrinsics::ctpop64,
@@ -1236,6 +1379,19 @@ impl usize {
         intrinsics::u64_sub_with_overflow,
         intrinsics::u64_mul_with_overflow }
 }
+#[cfg(target_pointer_width = "64")]
+#[lang = "usize"]
+#[cfg(not(stage0))]
+impl usize {
+    uint_impl! { u64, 64,
+        intrinsics::ctpop,
+        intrinsics::ctlz,
+        intrinsics::cttz,
+        intrinsics::bswap,
+        intrinsics::add_with_overflow,
+        intrinsics::sub_with_overflow,
+        intrinsics::mul_with_overflow }
+}
 
 /// Used for representing the classification of floating point numbers
 #[derive(Copy, Clone, PartialEq, Debug)]
diff --git a/src/libcore/num/wrapping.rs b/src/libcore/num/wrapping.rs
index 0e8ced0aa19..88f71e63da6 100644
--- a/src/libcore/num/wrapping.rs
+++ b/src/libcore/num/wrapping.rs
@@ -12,23 +12,35 @@
 #![unstable(feature = "wrapping", reason = "may be removed or relocated",
             issue = "27755")]
 
+#[cfg(stage0)]
+pub use intrinsics::{
+    u8_add_with_overflow, i8_add_with_overflow,
+    u16_add_with_overflow, i16_add_with_overflow,
+    u32_add_with_overflow, i32_add_with_overflow,
+    u64_add_with_overflow, i64_add_with_overflow,
+
+    u8_sub_with_overflow, i8_sub_with_overflow,
+    u16_sub_with_overflow, i16_sub_with_overflow,
+    u32_sub_with_overflow, i32_sub_with_overflow,
+    u64_sub_with_overflow, i64_sub_with_overflow,
+
+    u8_mul_with_overflow, i8_mul_with_overflow,
+    u16_mul_with_overflow, i16_mul_with_overflow,
+    u32_mul_with_overflow, i32_mul_with_overflow,
+    u64_mul_with_overflow, i64_mul_with_overflow,
+};
+
+#[cfg(not(stage0))]
+pub use intrinsics::{
+    add_with_overflow,
+    sub_with_overflow,
+    mul_with_overflow,
+};
+
 use super::Wrapping;
 
 use ops::*;
 
-use intrinsics::{i8_add_with_overflow, u8_add_with_overflow};
-use intrinsics::{i16_add_with_overflow, u16_add_with_overflow};
-use intrinsics::{i32_add_with_overflow, u32_add_with_overflow};
-use intrinsics::{i64_add_with_overflow, u64_add_with_overflow};
-use intrinsics::{i8_sub_with_overflow, u8_sub_with_overflow};
-use intrinsics::{i16_sub_with_overflow, u16_sub_with_overflow};
-use intrinsics::{i32_sub_with_overflow, u32_sub_with_overflow};
-use intrinsics::{i64_sub_with_overflow, u64_sub_with_overflow};
-use intrinsics::{i8_mul_with_overflow, u8_mul_with_overflow};
-use intrinsics::{i16_mul_with_overflow, u16_mul_with_overflow};
-use intrinsics::{i32_mul_with_overflow, u32_mul_with_overflow};
-use intrinsics::{i64_mul_with_overflow, u64_mul_with_overflow};
-
 use ::{i8,i16,i32,i64};
 
 pub trait OverflowingOps {
@@ -191,23 +203,47 @@ macro_rules! signed_overflowing_impl {
     ($($t:ident)*) => ($(
         impl OverflowingOps for $t {
             #[inline(always)]
+            #[cfg(stage0)]
             fn overflowing_add(self, rhs: $t) -> ($t, bool) {
                 unsafe {
                     concat_idents!($t, _add_with_overflow)(self, rhs)
                 }
             }
             #[inline(always)]
+            #[cfg(not(stage0))]
+            fn overflowing_add(self, rhs: $t) -> ($t, bool) {
+                unsafe {
+                    add_with_overflow(self, rhs)
+                }
+            }
+            #[inline(always)]
+            #[cfg(stage0)]
             fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
                 unsafe {
                     concat_idents!($t, _sub_with_overflow)(self, rhs)
                 }
             }
             #[inline(always)]
+            #[cfg(not(stage0))]
+            fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
+                unsafe {
+                    sub_with_overflow(self, rhs)
+                }
+            }
+            #[inline(always)]
+            #[cfg(stage0)]
             fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
                 unsafe {
                     concat_idents!($t, _mul_with_overflow)(self, rhs)
                 }
             }
+            #[inline(always)]
+            #[cfg(not(stage0))]
+            fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
+                unsafe {
+                    mul_with_overflow(self, rhs)
+                }
+            }
 
             #[inline(always)]
             fn overflowing_div(self, rhs: $t) -> ($t, bool) {
@@ -253,23 +289,47 @@ macro_rules! unsigned_overflowing_impl {
     ($($t:ident)*) => ($(
         impl OverflowingOps for $t {
             #[inline(always)]
+            #[cfg(stage0)]
             fn overflowing_add(self, rhs: $t) -> ($t, bool) {
                 unsafe {
                     concat_idents!($t, _add_with_overflow)(self, rhs)
                 }
             }
             #[inline(always)]
+            #[cfg(not(stage0))]
+            fn overflowing_add(self, rhs: $t) -> ($t, bool) {
+                unsafe {
+                    add_with_overflow(self, rhs)
+                }
+            }
+            #[inline(always)]
+            #[cfg(stage0)]
             fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
                 unsafe {
                     concat_idents!($t, _sub_with_overflow)(self, rhs)
                 }
             }
             #[inline(always)]
+            #[cfg(not(stage0))]
+            fn overflowing_sub(self, rhs: $t) -> ($t, bool) {
+                unsafe {
+                    sub_with_overflow(self, rhs)
+                }
+            }
+            #[inline(always)]
+            #[cfg(stage0)]
             fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
                 unsafe {
                     concat_idents!($t, _mul_with_overflow)(self, rhs)
                 }
             }
+            #[inline(always)]
+            #[cfg(not(stage0))]
+            fn overflowing_mul(self, rhs: $t) -> ($t, bool) {
+                unsafe {
+                    mul_with_overflow(self, rhs)
+                }
+            }
 
             #[inline(always)]
             fn overflowing_div(self, rhs: $t) -> ($t, bool) {
@@ -305,6 +365,7 @@ unsigned_overflowing_impl! { u8 u16 u32 u64 }
 #[cfg(target_pointer_width = "64")]
 impl OverflowingOps for usize {
     #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_add(self, rhs: usize) -> (usize, bool) {
         unsafe {
             let res = u64_add_with_overflow(self as u64, rhs as u64);
@@ -312,6 +373,14 @@ impl OverflowingOps for usize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_add(self, rhs: usize) -> (usize, bool) {
+        unsafe {
+            add_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
         unsafe {
             let res = u64_sub_with_overflow(self as u64, rhs as u64);
@@ -319,6 +388,14 @@ impl OverflowingOps for usize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
+        unsafe {
+            sub_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
         unsafe {
             let res = u64_mul_with_overflow(self as u64, rhs as u64);
@@ -326,6 +403,13 @@ impl OverflowingOps for usize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
+        unsafe {
+            mul_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
     fn overflowing_div(self, rhs: usize) -> (usize, bool) {
         let (r, f) = (self as u64).overflowing_div(rhs as u64);
         (r as usize, f)
@@ -355,6 +439,7 @@ impl OverflowingOps for usize {
 #[cfg(target_pointer_width = "32")]
 impl OverflowingOps for usize {
     #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_add(self, rhs: usize) -> (usize, bool) {
         unsafe {
             let res = u32_add_with_overflow(self as u32, rhs as u32);
@@ -362,6 +447,14 @@ impl OverflowingOps for usize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_add(self, rhs: usize) -> (usize, bool) {
+        unsafe {
+            add_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
         unsafe {
             let res = u32_sub_with_overflow(self as u32, rhs as u32);
@@ -369,6 +462,14 @@ impl OverflowingOps for usize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_sub(self, rhs: usize) -> (usize, bool) {
+        unsafe {
+            sub_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
         unsafe {
             let res = u32_mul_with_overflow(self as u32, rhs as u32);
@@ -376,6 +477,13 @@ impl OverflowingOps for usize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_mul(self, rhs: usize) -> (usize, bool) {
+        unsafe {
+            mul_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
     fn overflowing_div(self, rhs: usize) -> (usize, bool) {
         let (r, f) = (self as u32).overflowing_div(rhs as u32);
         (r as usize, f)
@@ -405,6 +513,7 @@ impl OverflowingOps for usize {
 #[cfg(target_pointer_width = "64")]
 impl OverflowingOps for isize {
     #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_add(self, rhs: isize) -> (isize, bool) {
         unsafe {
             let res = i64_add_with_overflow(self as i64, rhs as i64);
@@ -412,6 +521,14 @@ impl OverflowingOps for isize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_add(self, rhs: isize) -> (isize, bool) {
+        unsafe {
+            add_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
         unsafe {
             let res = i64_sub_with_overflow(self as i64, rhs as i64);
@@ -419,6 +536,14 @@ impl OverflowingOps for isize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
+        unsafe {
+            sub_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
         unsafe {
             let res = i64_mul_with_overflow(self as i64, rhs as i64);
@@ -426,6 +551,13 @@ impl OverflowingOps for isize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
+        unsafe {
+            mul_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
     fn overflowing_div(self, rhs: isize) -> (isize, bool) {
         let (r, f) = (self as i64).overflowing_div(rhs as i64);
         (r as isize, f)
@@ -455,6 +587,7 @@ impl OverflowingOps for isize {
 #[cfg(target_pointer_width = "32")]
 impl OverflowingOps for isize {
     #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_add(self, rhs: isize) -> (isize, bool) {
         unsafe {
             let res = i32_add_with_overflow(self as i32, rhs as i32);
@@ -462,6 +595,14 @@ impl OverflowingOps for isize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_add(self, rhs: isize) -> (isize, bool) {
+        unsafe {
+            add_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
         unsafe {
             let res = i32_sub_with_overflow(self as i32, rhs as i32);
@@ -469,6 +610,14 @@ impl OverflowingOps for isize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_sub(self, rhs: isize) -> (isize, bool) {
+        unsafe {
+            sub_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
+    #[cfg(stage0)]
     fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
         unsafe {
             let res = i32_mul_with_overflow(self as i32, rhs as i32);
@@ -476,6 +625,13 @@ impl OverflowingOps for isize {
         }
     }
     #[inline(always)]
+    #[cfg(not(stage0))]
+    fn overflowing_mul(self, rhs: isize) -> (isize, bool) {
+        unsafe {
+            mul_with_overflow(self, rhs)
+        }
+    }
+    #[inline(always)]
     fn overflowing_div(self, rhs: isize) -> (isize, bool) {
         let (r, f) = (self as i32).overflowing_div(rhs as i32);
         (r as isize, f)