diff options
| author | bors <bors@rust-lang.org> | 2020-03-04 04:10:58 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2020-03-04 04:10:58 +0000 |
| commit | 4d71c164a89b705df6affd31a5262c832d1bc48d (patch) | |
| tree | 4861bf04a627a9af944ad786b5930e3a4ccbe5c1 /src | |
| parent | 592e9c37008c2389451d28874a748f5b38612ca5 (diff) | |
| parent | 0e157380ae2a7af0c457aea1c8be22161899ea1d (diff) | |
| download | rust-4d71c164a89b705df6affd31a5262c832d1bc48d.tar.gz rust-4d71c164a89b705df6affd31a5262c832d1bc48d.zip | |
Auto merge of #69550 - RalfJung:scalar, r=oli-obk
interpret engine: Scalar cleanup * Remove `to_ptr` * Make `to_bits` private r? @oli-obk
Diffstat (limited to 'src')
| -rw-r--r-- | src/librustc/mir/interpret/value.rs | 42 | ||||
| -rw-r--r-- | src/librustc/ty/sty.rs | 2 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/intrinsics.rs | 2 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/operand.rs | 63 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/validity.rs | 15 | ||||
| -rw-r--r-- | src/librustc_mir/transform/const_prop.rs | 4 | ||||
| -rw-r--r-- | src/librustc_mir_build/hair/pattern/_match.rs | 28 |
7 files changed, 69 insertions, 87 deletions
diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index 2c146b5d7b4..2be60d35af3 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -170,6 +170,10 @@ impl<Tag> From<Double> for Scalar<Tag> { } impl Scalar<()> { + /// Make sure the `data` fits in `size`. + /// This is guaranteed by all constructors here, but since the enum variants are public, + /// it could still be violated (even though no code outside this file should + /// construct `Scalar`s). #[inline(always)] fn check_data(data: u128, size: u8) { debug_assert_eq!( @@ -364,10 +368,10 @@ impl<'tcx, Tag> Scalar<Tag> { target_size: Size, cx: &impl HasDataLayout, ) -> Result<u128, Pointer<Tag>> { + assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); match self { Scalar::Raw { data, size } => { assert_eq!(target_size.bytes(), size as u64); - assert_ne!(size, 0, "you should never look at the bits of a ZST"); Scalar::check_data(data, size); Ok(data) } @@ -378,19 +382,15 @@ impl<'tcx, Tag> Scalar<Tag> { } } - #[inline(always)] - pub fn check_raw(data: u128, size: u8, target_size: Size) { - assert_eq!(target_size.bytes(), size as u64); - assert_ne!(size, 0, "you should never look at the bits of a ZST"); - Scalar::check_data(data, size); - } - - /// Do not call this method! Use either `assert_bits` or `force_bits`. + /// This method is intentionally private! + /// It is just a helper for other methods in this file. #[inline] - pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> { + fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> { + assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); match self { Scalar::Raw { data, size } => { - Self::check_raw(data, size, target_size); + assert_eq!(target_size.bytes(), size as u64); + Scalar::check_data(data, size); Ok(data) } Scalar::Ptr(_) => throw_unsup!(ReadPointerAsBytes), @@ -402,22 +402,14 @@ impl<'tcx, Tag> Scalar<Tag> { self.to_bits(target_size).expect("expected Raw bits but got a Pointer") } - /// Do not call this method! Use either `assert_ptr` or `force_ptr`. - /// This method is intentionally private, do not make it public. #[inline] - fn to_ptr(self) -> InterpResult<'tcx, Pointer<Tag>> { + pub fn assert_ptr(self) -> Pointer<Tag> { match self { - Scalar::Raw { data: 0, .. } => throw_unsup!(InvalidNullPointerUsage), - Scalar::Raw { .. } => throw_unsup!(ReadBytesAsPointer), - Scalar::Ptr(p) => Ok(p), + Scalar::Ptr(p) => p, + Scalar::Raw { .. } => bug!("expected a Pointer but got Raw bits"), } } - #[inline(always)] - pub fn assert_ptr(self) -> Pointer<Tag> { - self.to_ptr().expect("expected a Pointer but got Raw bits") - } - /// Do not call this method! Dispatch based on the type instead. #[inline] pub fn is_bits(self) -> bool { @@ -595,12 +587,6 @@ impl<'tcx, Tag> ScalarMaybeUndef<Tag> { } } - /// Do not call this method! Use either `assert_bits` or `force_bits`. - #[inline(always)] - pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> { - self.not_undef()?.to_bits(target_size) - } - #[inline(always)] pub fn to_bool(self) -> InterpResult<'tcx, bool> { self.not_undef()?.to_bool() diff --git a/src/librustc/ty/sty.rs b/src/librustc/ty/sty.rs index 9f60c9cdbcc..ab2c98c89b4 100644 --- a/src/librustc/ty/sty.rs +++ b/src/librustc/ty/sty.rs @@ -2571,7 +2571,7 @@ impl<'tcx> ConstKind<'tcx> { #[inline] pub fn try_to_bits(&self, size: ty::layout::Size) -> Option<u128> { - self.try_to_scalar()?.to_bits(size).ok() + if let ConstKind::Value(val) = self { val.try_to_bits(size) } else { None } } } diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs index cd06cf01bfa..d63abdc3562 100644 --- a/src/librustc_mir/interpret/intrinsics.rs +++ b/src/librustc_mir/interpret/intrinsics.rs @@ -384,7 +384,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`. // First, check x % y != 0 (or if that computation overflows). let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, a, b)?; - if overflow || res.to_bits(a.layout.size)? != 0 { + if overflow || res.assert_bits(a.layout.size) != 0 { // Then, check if `b` is -1, which is the "min_value / -1" case. let minus1 = Scalar::from_int(-1, dest.layout.size); let b_scalar = b.to_scalar().unwrap(); diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index df4b9b16186..44b46d65bf1 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -96,40 +96,40 @@ pub struct ImmTy<'tcx, Tag = ()> { impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match &self.imm { - Immediate::Scalar(ScalarMaybeUndef::Scalar(s)) => match s.to_bits(self.layout.size) { - Ok(s) => { - match self.layout.ty.kind { - ty::Int(_) => { - return write!( - fmt, - "{}", - super::sign_extend(s, self.layout.size) as i128, - ); - } - ty::Uint(_) => return write!(fmt, "{}", s), - ty::Bool if s == 0 => return fmt.write_str("false"), - ty::Bool if s == 1 => return fmt.write_str("true"), - ty::Char => { - if let Some(c) = u32::try_from(s).ok().and_then(std::char::from_u32) { - return write!(fmt, "{}", c); - } + // We cannot use `to_bits_or_ptr` as we do not have a `tcx`. + // So we use `is_bits` and circumvent a bunch of sanity checking -- but + // this is anyway only for printing. + Immediate::Scalar(ScalarMaybeUndef::Scalar(s)) if s.is_ptr() => { + fmt.write_str("{pointer}") + } + Immediate::Scalar(ScalarMaybeUndef::Scalar(s)) => { + let s = s.assert_bits(self.layout.size); + match self.layout.ty.kind { + ty::Int(_) => { + return write!(fmt, "{}", super::sign_extend(s, self.layout.size) as i128,); + } + ty::Uint(_) => return write!(fmt, "{}", s), + ty::Bool if s == 0 => return fmt.write_str("false"), + ty::Bool if s == 1 => return fmt.write_str("true"), + ty::Char => { + if let Some(c) = u32::try_from(s).ok().and_then(std::char::from_u32) { + return write!(fmt, "{}", c); } - ty::Float(ast::FloatTy::F32) => { - if let Ok(u) = u32::try_from(s) { - return write!(fmt, "{}", f32::from_bits(u)); - } + } + ty::Float(ast::FloatTy::F32) => { + if let Ok(u) = u32::try_from(s) { + return write!(fmt, "{}", f32::from_bits(u)); } - ty::Float(ast::FloatTy::F64) => { - if let Ok(u) = u64::try_from(s) { - return write!(fmt, "{}", f64::from_bits(u)); - } + } + ty::Float(ast::FloatTy::F64) => { + if let Ok(u) = u64::try_from(s) { + return write!(fmt, "{}", f64::from_bits(u)); } - _ => {} } - write!(fmt, "{:x}", s) + _ => {} } - Err(_) => fmt.write_str("{pointer}"), - }, + write!(fmt, "{:x}", s) + } Immediate::Scalar(ScalarMaybeUndef::Undef) => fmt.write_str("{undef}"), Immediate::ScalarPair(..) => fmt.write_str("{wide pointer or tuple}"), } @@ -205,11 +205,6 @@ impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> { pub fn from_int(i: impl Into<i128>, layout: TyLayout<'tcx>) -> Self { Self::from_scalar(Scalar::from_int(i, layout.size), layout) } - - #[inline] - pub fn to_bits(self) -> InterpResult<'tcx, u128> { - self.to_scalar()?.to_bits(self.layout.size) - } } // Use the existing layout if given (but sanity check in debug mode), diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index 50ba3e93895..731dcc6a25f 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -322,16 +322,17 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M ty::Float(_) | ty::Int(_) | ty::Uint(_) => { // NOTE: Keep this in sync with the array optimization for int/float // types below! - let size = value.layout.size; let value = value.to_scalar_or_undef(); if self.ref_tracking_for_consts.is_some() { // Integers/floats in CTFE: Must be scalar bits, pointers are dangerous - try_validation!( - value.to_bits(size), - value, - self.path, - "initialized plain (non-pointer) bytes" - ); + let is_bits = value.not_undef().map_or(false, |v| v.is_bits()); + if !is_bits { + throw_validation_failure!( + value, + self.path, + "initialized plain (non-pointer) bytes" + ) + } } else { // At run-time, for now, we accept *anything* for these types, including // undef. We should fix that, but let's start low. diff --git a/src/librustc_mir/transform/const_prop.rs b/src/librustc_mir/transform/const_prop.rs index 8c1b73510df..c196e748df3 100644 --- a/src/librustc_mir/transform/const_prop.rs +++ b/src/librustc_mir/transform/const_prop.rs @@ -545,7 +545,9 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let left_ty = left.ty(&self.local_decls, self.tcx); let left_size_bits = self.ecx.layout_of(left_ty).ok()?.size.bits(); let right_size = r.layout.size; - let r_bits = r.to_scalar().and_then(|r| r.to_bits(right_size)); + let r_bits = r.to_scalar().ok(); + // This is basically `force_bits`. + let r_bits = r_bits.and_then(|r| r.to_bits_or_ptr(right_size, &self.tcx).ok()); if r_bits.map_or(false, |b| b >= left_size_bits as u128) { self.report_assert_as_lint( lint::builtin::ARITHMETIC_OVERFLOW, diff --git a/src/librustc_mir_build/hair/pattern/_match.rs b/src/librustc_mir_build/hair/pattern/_match.rs index 90e4f536478..4c7e6e1754a 100644 --- a/src/librustc_mir_build/hair/pattern/_match.rs +++ b/src/librustc_mir_build/hair/pattern/_match.rs @@ -1396,21 +1396,19 @@ impl<'tcx> IntRange<'tcx> { ) -> Option<IntRange<'tcx>> { if let Some((target_size, bias)) = Self::integral_size_and_signed_bias(tcx, value.ty) { let ty = value.ty; - let val = if let ty::ConstKind::Value(ConstValue::Scalar(Scalar::Raw { data, size })) = - value.val - { - // For this specific pattern we can skip a lot of effort and go - // straight to the result, after doing a bit of checking. (We - // could remove this branch and just use the next branch, which - // is more general but much slower.) - Scalar::<()>::check_raw(data, size, target_size); - data - } else if let Some(val) = value.try_eval_bits(tcx, param_env, ty) { - // This is a more general form of the previous branch. - val - } else { - return None; - }; + let val = (|| { + if let ty::ConstKind::Value(ConstValue::Scalar(scalar)) = value.val { + // For this specific pattern we can skip a lot of effort and go + // straight to the result, after doing a bit of checking. (We + // could remove this branch and just fall through, which + // is more general but much slower.) + if let Ok(bits) = scalar.to_bits_or_ptr(target_size, &tcx) { + return Some(bits); + } + } + // This is a more general form of the previous case. + value.try_eval_bits(tcx, param_env, ty) + })()?; let val = val ^ bias; Some(IntRange { range: val..=val, ty, span }) } else { |
