about summary refs log tree commit diff
path: root/compiler/rustc_const_eval/src
diff options
context:
space:
mode:
authorRalf Jung <post@ralfj.de>2023-11-12 12:16:41 +0100
committerRalf Jung <post@ralfj.de>2023-11-12 12:49:46 +0100
commit31493c70fad547a4ba056aa2eddb6ec4d5255093 (patch)
tree347e9117ea51263773fb99f7e51d9df23a9bcc52 /compiler/rustc_const_eval/src
parenta04d56b36d8f634abd7bdd64dd859a30655f1818 (diff)
downloadrust-31493c70fad547a4ba056aa2eddb6ec4d5255093.tar.gz
rust-31493c70fad547a4ba056aa2eddb6ec4d5255093.zip
interpret: simplify handling of shifts by no longer trying to handle signed and unsigned shift amounts in the same branch
Diffstat (limited to 'compiler/rustc_const_eval/src')
-rw-r--r--compiler/rustc_const_eval/src/interpret/operator.rs54
1 files changed, 26 insertions, 28 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs
index a3ba9530f9d..5a3c85ab694 100644
--- a/compiler/rustc_const_eval/src/interpret/operator.rs
+++ b/compiler/rustc_const_eval/src/interpret/operator.rs
@@ -157,41 +157,35 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
         // Shift ops can have an RHS with a different numeric type.
         if matches!(bin_op, Shl | ShlUnchecked | Shr | ShrUnchecked) {
-            let size = u128::from(left_layout.size.bits());
-            // Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its
-            // zero-extended form). This matches the codegen backend:
-            // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/base.rs#L315-L317>.
-            // The overflow check is also ignorant to the sign:
-            // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/mir/rvalue.rs#L728>.
-            // This would behave rather strangely if we had integer types of size 256: a shift by
-            // -1i8 would actually shift by 255, but that would *not* be considered overflowing. A
-            // shift by -1i16 though would be considered overflowing. If we had integers of size
-            // 512, then a shift by -1i8 would even produce a different result than one by -1i16:
-            // the first shifts by 255, the latter by u16::MAX % 512 = 511. Lucky enough, our
-            // integers are maximally 128bits wide, so negative shifts *always* overflow and we have
-            // consistent results for the same value represented at different bit widths.
-            assert!(size <= 128);
-            let original_r = r;
-            let overflow = r >= size;
-            // The shift offset is implicitly masked to the type size, to make sure this operation
-            // is always defined. This is the one MIR operator that does *not* directly map to a
-            // single LLVM operation. See
-            // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/common.rs#L131-L158>
-            // for the corresponding truncation in our codegen backends.
-            let r = r % size;
-            let r = u32::try_from(r).unwrap(); // we masked so this will always fit
+            let size = left_layout.size.bits();
+            // The shift offset is implicitly masked to the type size. (This is the one MIR operator
+            // that does *not* directly map to a single LLVM operation.) Compute how much we
+            // actually shift and whether there was an overflow due to shifting too much.
+            let (shift_amount, overflow) = if right_layout.abi.is_signed() {
+                let shift_amount = self.sign_extend(r, right_layout) as i128;
+                let overflow = shift_amount < 0 || shift_amount >= i128::from(size);
+                let masked_amount = (shift_amount as u128) % u128::from(size);
+                debug_assert_eq!(overflow, shift_amount != (masked_amount as i128));
+                (masked_amount, overflow)
+            } else {
+                let shift_amount = r;
+                let masked_amount = shift_amount % u128::from(size);
+                (masked_amount, shift_amount != masked_amount)
+            };
+            let shift_amount = u32::try_from(shift_amount).unwrap(); // we masked so this will always fit
+            // Compute the shifted result.
             let result = if left_layout.abi.is_signed() {
                 let l = self.sign_extend(l, left_layout) as i128;
                 let result = match bin_op {
-                    Shl | ShlUnchecked => l.checked_shl(r).unwrap(),
-                    Shr | ShrUnchecked => l.checked_shr(r).unwrap(),
+                    Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
+                    Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
                     _ => bug!(),
                 };
                 result as u128
             } else {
                 match bin_op {
-                    Shl | ShlUnchecked => l.checked_shl(r).unwrap(),
-                    Shr | ShrUnchecked => l.checked_shr(r).unwrap(),
+                    Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
+                    Shr | ShrUnchecked => l.checked_shr(shift_amount).unwrap(),
                     _ => bug!(),
                 }
             };
@@ -200,7 +194,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
                 throw_ub_custom!(
                     fluent::const_eval_overflow_shift,
-                    val = original_r,
+                    val = if right_layout.abi.is_signed() {
+                        (self.sign_extend(r, right_layout) as i128).to_string()
+                    } else {
+                        r.to_string()
+                    },
                     name = intrinsic_name
                 );
             }