diff options
| -rw-r--r-- | src/librustc/mir/interpret/allocation.rs | 78 | ||||
| -rw-r--r-- | src/librustc/mir/interpret/mod.rs | 43 | ||||
| -rw-r--r-- | src/librustc/mir/interpret/pointer.rs | 4 | ||||
| -rw-r--r-- | src/librustc/mir/interpret/value.rs | 73 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/cast.rs | 6 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/eval_context.rs | 4 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/intrinsics.rs | 10 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/intrinsics/caller_location.rs | 6 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/memory.rs | 30 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/operand.rs | 16 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/place.rs | 37 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/step.rs | 5 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/terminator.rs | 4 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/traits.rs | 9 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/validity.rs | 10 | ||||
| -rw-r--r-- | src/librustc_mir/interpret/visitor.rs | 4 |
16 files changed, 186 insertions, 153 deletions
diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs index 946b6add40a..4d42e796d10 100644 --- a/src/librustc/mir/interpret/allocation.rs +++ b/src/librustc/mir/interpret/allocation.rs @@ -1,18 +1,20 @@ //! The virtual memory representation of the MIR interpreter. +use std::borrow::Cow; +use std::convert::TryFrom; +use std::iter; +use std::ops::{Add, Deref, DerefMut, Mul, Range, Sub}; + +use rustc_ast::ast::Mutability; +use rustc_data_structures::sorted_map::SortedMap; +use rustc_target::abi::HasDataLayout; + use super::{ read_target_uint, write_target_uint, AllocId, InterpResult, Pointer, Scalar, ScalarMaybeUndef, }; use crate::ty::layout::{Align, Size}; -use rustc_ast::ast::Mutability; -use rustc_data_structures::sorted_map::SortedMap; -use rustc_target::abi::HasDataLayout; -use std::borrow::Cow; -use std::iter; -use std::ops::{Deref, DerefMut, Range}; - // NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in // `src/librustc_mir/interpret/snapshot.rs`. #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)] @@ -90,7 +92,7 @@ impl<Tag> Allocation<Tag> { /// Creates a read-only allocation initialized by the given bytes pub fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align) -> Self { let bytes = slice.into().into_owned(); - let size = Size::from_bytes(bytes.len() as u64); + let size = Size::from_bytes(u64::try_from(bytes.len()).unwrap()); Self { bytes, relocations: Relocations::new(), @@ -107,9 +109,8 @@ impl<Tag> Allocation<Tag> { } pub fn undef(size: Size, align: Align) -> Self { - assert_eq!(size.bytes() as usize as u64, size.bytes()); Allocation { - bytes: vec![0; size.bytes() as usize], + bytes: vec![0; usize::try_from(size.bytes()).unwrap()], relocations: Relocations::new(), undef_mask: UndefMask::new(size, false), size, @@ -152,7 +153,7 @@ impl Allocation<(), ()> { /// Raw accessors. Provide access to otherwise private bytes. impl<Tag, Extra> Allocation<Tag, Extra> { pub fn len(&self) -> usize { - self.size.bytes() as usize + usize::try_from(self.size.bytes()).unwrap() } /// Looks at a slice which may describe undefined bytes or describe a relocation. This differs @@ -182,13 +183,8 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> { /// Returns the range of this allocation that was meant. #[inline] fn check_bounds(&self, offset: Size, size: Size) -> Range<usize> { - let end = offset + size; // This does overflow checking. - assert_eq!( - end.bytes() as usize as u64, - end.bytes(), - "cannot handle this access on this host architecture" - ); - let end = end.bytes() as usize; + let end = Size::add(offset, size); // This does overflow checking. + let end = usize::try_from(end.bytes()).expect("access too big for this host architecture"); assert!( end <= self.len(), "Out-of-bounds access at offset {}, size {} in allocation of size {}", @@ -196,7 +192,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> { size.bytes(), self.len() ); - (offset.bytes() as usize)..end + usize::try_from(offset.bytes()).unwrap()..end } /// The last argument controls whether we error out when there are undefined @@ -294,11 +290,11 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> { cx: &impl HasDataLayout, ptr: Pointer<Tag>, ) -> InterpResult<'tcx, &[u8]> { - assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes()); - let offset = ptr.offset.bytes() as usize; + let offset = usize::try_from(ptr.offset.bytes()).unwrap(); Ok(match self.bytes[offset..].iter().position(|&c| c == 0) { Some(size) => { - let size_with_null = Size::from_bytes((size + 1) as u64); + let size_with_null = + Size::from_bytes(u64::try_from(size.checked_add(1).unwrap()).unwrap()); // Go through `get_bytes` for checks and AllocationExtra hooks. // We read the null, so we include it in the request, but we want it removed // from the result, so we do subslicing. @@ -343,7 +339,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> { let (lower, upper) = src.size_hint(); let len = upper.expect("can only write bounded iterators"); assert_eq!(lower, len, "can only write iterators with a precise length"); - let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len as u64))?; + let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(u64::try_from(len).unwrap()))?; // `zip` would stop when the first iterator ends; we want to definitely // cover all of `bytes`. for dest in bytes { @@ -386,7 +382,11 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> { } else { match self.relocations.get(&ptr.offset) { Some(&(tag, alloc_id)) => { - let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag); + let ptr = Pointer::new_with_tag( + alloc_id, + Size::from_bytes(u64::try_from(bits).unwrap()), + tag, + ); return Ok(ScalarMaybeUndef::Scalar(ptr.into())); } None => {} @@ -433,7 +433,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> { }; let bytes = match val.to_bits_or_ptr(type_size, cx) { - Err(val) => val.offset.bytes() as u128, + Err(val) => u128::from(val.offset.bytes()), Ok(data) => data, }; @@ -479,7 +479,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> { // We have to go back `pointer_size - 1` bytes, as that one would still overlap with // the beginning of this range. let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1); - let end = ptr.offset + size; // This does overflow checking. + let end = Size::add(ptr.offset, size); // This does overflow checking. self.relocations.range(Size::from_bytes(start)..end) } @@ -524,7 +524,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> { ) }; let start = ptr.offset; - let end = start + size; + let end = Size::add(start, size); // Mark parts of the outermost relocations as undefined if they partially fall outside the // given range. @@ -563,7 +563,7 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> { #[inline] fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> { self.undef_mask - .is_range_defined(ptr.offset, ptr.offset + size) + .is_range_defined(ptr.offset, Size::add(ptr.offset, size)) .or_else(|idx| throw_ub!(InvalidUndefBytes(Some(Pointer::new(ptr.alloc_id, idx))))) } @@ -571,7 +571,7 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> { if size.bytes() == 0 { return; } - self.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state); + self.undef_mask.set_range(ptr.offset, Size::add(ptr.offset, size), new_state); } } @@ -616,7 +616,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> { for i in 1..size.bytes() { // FIXME: optimize to bitshift the current undef block's bits and read the top bit. - if self.undef_mask.get(src.offset + Size::from_bytes(i)) == cur { + if self.undef_mask.get(Size::add(src.offset, Size::from_bytes(i))) == cur { cur_len += 1; } else { ranges.push(cur_len); @@ -643,7 +643,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> { if defined.ranges.len() <= 1 { self.undef_mask.set_range_inbounds( dest.offset, - dest.offset + size * repeat, + Size::add(dest.offset, Size::mul(size, repeat)), defined.initial, ); return; @@ -721,10 +721,10 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> { for i in 0..length { new_relocations.extend(relocations.iter().map(|&(offset, reloc)| { // compute offset for current repetition - let dest_offset = dest.offset + (i * size); + let dest_offset = Size::add(dest.offset, Size::mul(size, i)); ( // shift offsets from source allocation to destination allocation - offset + dest_offset - src.offset, + Size::sub(Size::add(offset, dest_offset), src.offset), reloc, ) })); @@ -861,18 +861,18 @@ impl UndefMask { if amount.bytes() == 0 { return; } - let unused_trailing_bits = self.blocks.len() as u64 * Self::BLOCK_SIZE - self.len.bytes(); + let unused_trailing_bits = + u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes(); if amount.bytes() > unused_trailing_bits { let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1; - assert_eq!(additional_blocks as usize as u64, additional_blocks); self.blocks.extend( // FIXME(oli-obk): optimize this by repeating `new_state as Block`. - iter::repeat(0).take(additional_blocks as usize), + iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()), ); } let start = self.len; self.len += amount; - self.set_range_inbounds(start, start + amount, new_state); + self.set_range_inbounds(start, Size::add(start, amount), new_state); } } @@ -881,7 +881,5 @@ fn bit_index(bits: Size) -> (usize, usize) { let bits = bits.bytes(); let a = bits / UndefMask::BLOCK_SIZE; let b = bits % UndefMask::BLOCK_SIZE; - assert_eq!(a as usize as u64, a); - assert_eq!(b as usize as u64, b); - (a as usize, b as usize) + (usize::try_from(a).unwrap(), usize::try_from(b).unwrap()) } diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs index 1b5fb4c9954..10c3a06da08 100644 --- a/src/librustc/mir/interpret/mod.rs +++ b/src/librustc/mir/interpret/mod.rs @@ -95,6 +95,27 @@ mod pointer; mod queries; mod value; +use std::convert::TryFrom; +use std::fmt; +use std::io; +use std::num::NonZeroU32; +use std::sync::atomic::{AtomicU32, Ordering}; + +use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt}; +use rustc_ast::ast::LitKind; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sync::{HashMapExt, Lock}; +use rustc_data_structures::tiny_list::TinyList; +use rustc_hir::def_id::DefId; +use rustc_macros::HashStable; +use rustc_serialize::{Decodable, Encodable, Encoder}; + +use crate::mir; +use crate::ty::codec::TyDecoder; +use crate::ty::layout::{self, Size}; +use crate::ty::subst::GenericArgKind; +use crate::ty::{self, Instance, Ty, TyCtxt}; + pub use self::error::{ struct_error, ConstEvalErr, ConstEvalRawResult, ConstEvalResult, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType, @@ -107,24 +128,6 @@ pub use self::allocation::{Allocation, AllocationExtra, Relocations, UndefMask}; pub use self::pointer::{CheckInAllocMsg, Pointer, PointerArithmetic}; -use crate::mir; -use crate::ty::codec::TyDecoder; -use crate::ty::layout::{self, Size}; -use crate::ty::subst::GenericArgKind; -use crate::ty::{self, Instance, Ty, TyCtxt}; -use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt}; -use rustc_ast::ast::LitKind; -use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::sync::{HashMapExt, Lock}; -use rustc_data_structures::tiny_list::TinyList; -use rustc_hir::def_id::DefId; -use rustc_macros::HashStable; -use rustc_serialize::{Decodable, Encodable, Encoder}; -use std::fmt; -use std::io; -use std::num::NonZeroU32; -use std::sync::atomic::{AtomicU32, Ordering}; - /// Uniquely identifies one of the following: /// - A constant /// - A static @@ -264,8 +267,8 @@ impl<'s> AllocDecodingSession<'s> { D: TyDecoder<'tcx>, { // Read the index of the allocation. - let idx = decoder.read_u32()? as usize; - let pos = self.state.data_offsets[idx] as usize; + let idx = usize::try_from(decoder.read_u32()?).unwrap(); + let pos = usize::try_from(self.state.data_offsets[idx]).unwrap(); // Decode the `AllocDiscriminant` now so that we know if we have to reserve an // `AllocId`. diff --git a/src/librustc/mir/interpret/pointer.rs b/src/librustc/mir/interpret/pointer.rs index 7d862d43bba..ff479aee4e0 100644 --- a/src/librustc/mir/interpret/pointer.rs +++ b/src/librustc/mir/interpret/pointer.rs @@ -62,9 +62,9 @@ pub trait PointerArithmetic: layout::HasDataLayout { /// This should be called by all the other methods before returning! #[inline] fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) { - let val = val as u128; + let val = u128::from(val); let max_ptr_plus_1 = 1u128 << self.pointer_size().bits(); - ((val % max_ptr_plus_1) as u64, over || val >= max_ptr_plus_1) + (u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1) } #[inline] diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index 59e6b1b0c37..4474fcd1918 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -1,3 +1,5 @@ +use std::convert::TryFrom; + use rustc_apfloat::{ ieee::{Double, Single}, Float, @@ -156,7 +158,7 @@ impl Scalar<()> { #[inline(always)] fn check_data(data: u128, size: u8) { debug_assert_eq!( - truncate(data, Size::from_bytes(size as u64)), + truncate(data, Size::from_bytes(u64::from(size))), data, "Scalar value {:#x} exceeds size of {} bytes", data, @@ -203,8 +205,11 @@ impl<'tcx, Tag> Scalar<Tag> { let dl = cx.data_layout(); match self { Scalar::Raw { data, size } => { - assert_eq!(size as u64, dl.pointer_size.bytes()); - Ok(Scalar::Raw { data: dl.offset(data as u64, i.bytes())? as u128, size }) + assert_eq!(u64::from(size), dl.pointer_size.bytes()); + Ok(Scalar::Raw { + data: u128::from(dl.offset(u64::try_from(data).unwrap(), i.bytes())?), + size, + }) } Scalar::Ptr(ptr) => ptr.offset(i, dl).map(Scalar::Ptr), } @@ -215,8 +220,13 @@ impl<'tcx, Tag> Scalar<Tag> { let dl = cx.data_layout(); match self { Scalar::Raw { data, size } => { - assert_eq!(size as u64, dl.pointer_size.bytes()); - Scalar::Raw { data: dl.overflowing_offset(data as u64, i.bytes()).0 as u128, size } + assert_eq!(u64::from(size), dl.pointer_size.bytes()); + Scalar::Raw { + data: u128::from( + dl.overflowing_offset(u64::try_from(data).unwrap(), i.bytes()).0, + ), + size, + } } Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_offset(i, dl)), } @@ -227,8 +237,11 @@ impl<'tcx, Tag> Scalar<Tag> { let dl = cx.data_layout(); match self { Scalar::Raw { data, size } => { - assert_eq!(size as u64, dl.pointer_size().bytes()); - Ok(Scalar::Raw { data: dl.signed_offset(data as u64, i)? as u128, size }) + assert_eq!(u64::from(size), dl.pointer_size.bytes()); + Ok(Scalar::Raw { + data: u128::from(dl.signed_offset(u64::try_from(data).unwrap(), i)?), + size, + }) } Scalar::Ptr(ptr) => ptr.signed_offset(i, dl).map(Scalar::Ptr), } @@ -239,9 +252,11 @@ impl<'tcx, Tag> Scalar<Tag> { let dl = cx.data_layout(); match self { Scalar::Raw { data, size } => { - assert_eq!(size as u64, dl.pointer_size.bytes()); + assert_eq!(u64::from(size), dl.pointer_size.bytes()); Scalar::Raw { - data: dl.overflowing_signed_offset(data as u64, i128::from(i)).0 as u128, + data: u128::from( + dl.overflowing_signed_offset(u64::try_from(data).unwrap(), i128::from(i)).0, + ), size, } } @@ -281,25 +296,25 @@ impl<'tcx, Tag> Scalar<Tag> { #[inline] pub fn from_u8(i: u8) -> Self { // Guaranteed to be truncated and does not need sign extension. - Scalar::Raw { data: i as u128, size: 1 } + Scalar::Raw { data: i.into(), size: 1 } } #[inline] pub fn from_u16(i: u16) -> Self { // Guaranteed to be truncated and does not need sign extension. - Scalar::Raw { data: i as u128, size: 2 } + Scalar::Raw { data: i.into(), size: 2 } } #[inline] pub fn from_u32(i: u32) -> Self { // Guaranteed to be truncated and does not need sign extension. - Scalar::Raw { data: i as u128, size: 4 } + Scalar::Raw { data: i.into(), size: 4 } } #[inline] pub fn from_u64(i: u64) -> Self { // Guaranteed to be truncated and does not need sign extension. - Scalar::Raw { data: i as u128, size: 8 } + Scalar::Raw { data: i.into(), size: 8 } } #[inline] @@ -376,7 +391,7 @@ impl<'tcx, Tag> Scalar<Tag> { assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); match self { Scalar::Raw { data, size } => { - assert_eq!(target_size.bytes(), size as u64); + assert_eq!(target_size.bytes(), u64::from(size)); Scalar::check_data(data, size); Ok(data) } @@ -394,7 +409,7 @@ impl<'tcx, Tag> Scalar<Tag> { assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST"); match self { Scalar::Raw { data, size } => { - assert_eq!(target_size.bytes(), size as u64); + assert_eq!(target_size.bytes(), u64::from(size)); Scalar::check_data(data, size); Ok(data) } @@ -458,27 +473,27 @@ impl<'tcx, Tag> Scalar<Tag> { /// Converts the scalar to produce an `u8`. Fails if the scalar is a pointer. pub fn to_u8(self) -> InterpResult<'static, u8> { - self.to_unsigned_with_bit_width(8).map(|v| v as u8) + self.to_unsigned_with_bit_width(8).map(|v| u8::try_from(v).unwrap()) } /// Converts the scalar to produce an `u16`. Fails if the scalar is a pointer. pub fn to_u16(self) -> InterpResult<'static, u16> { - self.to_unsigned_with_bit_width(16).map(|v| v as u16) + self.to_unsigned_with_bit_width(16).map(|v| u16::try_from(v).unwrap()) } /// Converts the scalar to produce an `u32`. Fails if the scalar is a pointer. pub fn to_u32(self) -> InterpResult<'static, u32> { - self.to_unsigned_with_bit_width(32).map(|v| v as u32) + self.to_unsigned_with_bit_width(32).map(|v| u32::try_from(v).unwrap()) } /// Converts the scalar to produce an `u64`. Fails if the scalar is a pointer. pub fn to_u64(self) -> InterpResult<'static, u64> { - self.to_unsigned_with_bit_width(64).map(|v| v as u64) + self.to_unsigned_with_bit_width(64).map(|v| u64::try_from(v).unwrap()) } pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'static, u64> { let b = self.to_bits(cx.data_layout().pointer_size)?; - Ok(b as u64) + Ok(u64::try_from(b).unwrap()) } #[inline] @@ -490,41 +505,41 @@ impl<'tcx, Tag> Scalar<Tag> { /// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer. pub fn to_i8(self) -> InterpResult<'static, i8> { - self.to_signed_with_bit_width(8).map(|v| v as i8) + self.to_signed_with_bit_width(8).map(|v| i8::try_from(v).unwrap()) } /// Converts the scalar to produce an `i16`. Fails if the scalar is a pointer. pub fn to_i16(self) -> InterpResult<'static, i16> { - self.to_signed_with_bit_width(16).map(|v| v as i16) + self.to_signed_with_bit_width(16).map(|v| i16::try_from(v).unwrap()) } /// Converts the scalar to produce an `i32`. Fails if the scalar is a pointer. pub fn to_i32(self) -> InterpResult<'static, i32> { - self.to_signed_with_bit_width(32).map(|v| v as i32) + self.to_signed_with_bit_width(32).map(|v| i32::try_from(v).unwrap()) } /// Converts the scalar to produce an `i64`. Fails if the scalar is a pointer. pub fn to_i64(self) -> InterpResult<'static, i64> { - self.to_signed_with_bit_width(64).map(|v| v as i64) + self.to_signed_with_bit_width(64).map(|v| i64::try_from(v).unwrap()) } pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'static, i64> { let sz = cx.data_layout().pointer_size; let b = self.to_bits(sz)?; let b = sign_extend(b, sz) as i128; - Ok(b as i64) + Ok(i64::try_from(b).unwrap()) } #[inline] pub fn to_f32(self) -> InterpResult<'static, Single> { // Going through `u32` to check size and truncation. - Ok(Single::from_bits(self.to_u32()? as u128)) + Ok(Single::from_bits(self.to_u32()?.into())) } #[inline] pub fn to_f64(self) -> InterpResult<'static, Double> { // Going through `u64` to check size and truncation. - Ok(Double::from_bits(self.to_u64()? as u128)) + Ok(Double::from_bits(self.to_u64()?.into())) } } @@ -671,8 +686,8 @@ pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) -> data.get_bytes( cx, // invent a pointer, only the offset is relevant anyway - Pointer::new(AllocId(0), Size::from_bytes(start as u64)), - Size::from_bytes(len as u64), + Pointer::new(AllocId(0), Size::from_bytes(u64::try_from(start).unwrap())), + Size::from_bytes(u64::try_from(len).unwrap()), ) .unwrap_or_else(|err| bug!("const slice is invalid: {:?}", err)) } else { diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs index 5c70b28a567..e2431215583 100644 --- a/src/librustc_mir/interpret/cast.rs +++ b/src/librustc_mir/interpret/cast.rs @@ -1,3 +1,5 @@ +use std::convert::TryFrom; + use rustc::ty::adjustment::PointerCast; use rustc::ty::layout::{self, Size, TyLayout}; use rustc::ty::{self, Ty, TypeAndMut, TypeFoldable}; @@ -206,8 +208,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { Char => { // `u8` to `char` cast - assert_eq!(v as u8 as u128, v); - Ok(Scalar::from_uint(v, Size::from_bytes(4))) + Ok(Scalar::from_uint(u8::try_from(v).unwrap(), Size::from_bytes(4))) } // Casts to bool are not permitted by rustc, no need to handle them here. @@ -227,6 +228,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { match dest_ty.kind { // float -> uint Uint(t) => { + // FIXME: can we make `bit_width` return a type more compatible with `Size::bits`? let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits() as usize); let v = f.to_u128(width).value; // This should already fit the bit width diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs index c50146f295a..0d0f4daa85e 100644 --- a/src/librustc_mir/interpret/eval_context.rs +++ b/src/librustc_mir/interpret/eval_context.rs @@ -1,6 +1,7 @@ use std::cell::Cell; use std::fmt::Write; use std::mem; +use std::ops::Add; use rustc::ich::StableHashingContext; use rustc::mir; @@ -413,6 +414,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // and it also rounds up to alignment, which we want to avoid, // as the unsized field's alignment could be smaller. assert!(!layout.ty.is_simd()); + assert!(layout.fields.count() > 0); trace!("DST layout: {:?}", layout); let sized_size = layout.fields.offset(layout.fields.count() - 1); @@ -452,7 +454,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // here. But this is where the add would go.) // Return the sum of sizes and max of aligns. - let size = sized_size + unsized_size; + let size = Size::add(sized_size, unsized_size); // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs index 03aedad0d98..13bfb9895cb 100644 --- a/src/librustc_mir/interpret/intrinsics.rs +++ b/src/librustc_mir/interpret/intrinsics.rs @@ -29,11 +29,11 @@ fn numeric_intrinsic<'tcx, Tag>( Primitive::Int(integer, _) => integer.size(), _ => bug!("invalid `{}` argument: {:?}", name, bits), }; - let extra = 128 - size.bits() as u128; + let extra = 128 - u128::from(size.bits()); let bits_out = match name { - sym::ctpop => bits.count_ones() as u128, - sym::ctlz => bits.leading_zeros() as u128 - extra, - sym::cttz => (bits << extra).trailing_zeros() as u128 - extra, + sym::ctpop => u128::from(bits.count_ones()), + sym::ctlz => u128::from(bits.leading_zeros()) - extra, + sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra, sym::bswap => (bits << extra).swap_bytes(), sym::bitreverse => (bits << extra).reverse_bits(), _ => bug!("not a numeric intrinsic: {}", name), @@ -261,7 +261,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let val_bits = self.force_bits(val, layout.size)?; let raw_shift = self.read_scalar(args[1])?.not_undef()?; let raw_shift_bits = self.force_bits(raw_shift, layout.size)?; - let width_bits = layout.size.bits() as u128; + let width_bits = u128::from(layout.size.bits()); let shift_bits = raw_shift_bits % width_bits; let inv_shift_bits = (width_bits - shift_bits) % width_bits; let result_bits = if intrinsic_name == sym::rotate_left { diff --git a/src/librustc_mir/interpret/intrinsics/caller_location.rs b/src/librustc_mir/interpret/intrinsics/caller_location.rs index dc2b0e1b983..01f9cdea0f0 100644 --- a/src/librustc_mir/interpret/intrinsics/caller_location.rs +++ b/src/librustc_mir/interpret/intrinsics/caller_location.rs @@ -1,3 +1,5 @@ +use std::convert::TryFrom; + use rustc::middle::lang_items::PanicLocationLangItem; use rustc::ty::subst::Subst; use rustc_span::{Span, Symbol}; @@ -59,8 +61,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo()); ( Symbol::intern(&caller.file.name.to_string()), - caller.line as u32, - caller.col_display as u32 + 1, + u32::try_from(caller.line).unwrap(), + u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(), ) } diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs index 110f2ffd9d7..f51e38eb1ea 100644 --- a/src/librustc_mir/interpret/memory.rs +++ b/src/librustc_mir/interpret/memory.rs @@ -8,6 +8,8 @@ use std::borrow::Cow; use std::collections::VecDeque; +use std::convert::TryFrom; +use std::ops::{Add, Mul}; use std::ptr; use rustc::ty::layout::{Align, HasDataLayout, Size, TargetDataLayout}; @@ -346,7 +348,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { }; Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) { Ok(bits) => { - let bits = bits as u64; // it's ptr-sized + let bits = u64::try_from(bits).unwrap(); // it's ptr-sized assert!(size.bytes() == 0); // Must be non-NULL. if bits == 0 { @@ -667,7 +669,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { } if alloc.undef_mask().is_range_defined(i, i + Size::from_bytes(1)).is_ok() { // this `as usize` is fine, since `i` came from a `usize` - let i = i.bytes() as usize; + let i = usize::try_from(i.bytes()).unwrap(); // Checked definedness (and thus range) and relocations. This access also doesn't // influence interpreter execution but is only for debugging. @@ -835,7 +837,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { ) -> InterpResult<'tcx> { let src = src.into_iter(); let size = Size::from_bytes(src.size_hint().0 as u64); - // `write_bytes` checks that this lower bound matches the upper bound matches reality. + // `write_bytes` checks that this lower bound `size` matches the upper bound and reality. let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? { Some(ptr) => ptr, None => return Ok(()), // zero-sized access @@ -874,14 +876,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { let tcx = self.tcx.tcx; - // The bits have to be saved locally before writing to dest in case src and dest overlap. - assert_eq!(size.bytes() as usize as u64, size.bytes()); - // This checks relocation edges on the src. let src_bytes = self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr(); let dest_bytes = - self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; + self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, Size::mul(size, length))?; // If `dest_bytes` is empty we just optimize to not run anything for zsts. // See #67539 @@ -902,7 +901,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { // touched if the bytes stay undef for the whole interpreter execution. On contemporary // operating system this can avoid physically allocating the page. let dest_alloc = self.get_raw_mut(dest.alloc_id)?; - dest_alloc.mark_definedness(dest, size * length, false); + dest_alloc.mark_definedness(dest, Size::mul(size, length), false); dest_alloc.mark_relocation_range(relocations); return Ok(()); } @@ -913,11 +912,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { // The pointers above remain valid even if the `HashMap` table is moved around because they // point into the `Vec` storing the bytes. unsafe { - assert_eq!(size.bytes() as usize as u64, size.bytes()); if src.alloc_id == dest.alloc_id { if nonoverlapping { - if (src.offset <= dest.offset && src.offset + size > dest.offset) - || (dest.offset <= src.offset && dest.offset + size > src.offset) + if (src.offset <= dest.offset && Size::add(src.offset, size) > dest.offset) + || (dest.offset <= src.offset && Size::add(dest.offset, size) > src.offset) { throw_ub_format!("copy_nonoverlapping called on overlapping ranges") } @@ -926,16 +924,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { for i in 0..length { ptr::copy( src_bytes, - dest_bytes.offset((size.bytes() * i) as isize), - size.bytes() as usize, + dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()), + usize::try_from(size.bytes()).unwrap(), ); } } else { for i in 0..length { ptr::copy_nonoverlapping( src_bytes, - dest_bytes.offset((size.bytes() * i) as isize), - size.bytes() as usize, + dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()), + usize::try_from(size.bytes()).unwrap(), ); } } @@ -975,7 +973,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { ) -> InterpResult<'tcx, u128> { match scalar.to_bits_or_ptr(size, self) { Ok(bits) => Ok(bits), - Err(ptr) => Ok(M::ptr_to_int(&self, ptr)? as u128), + Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()), } } } diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs index 90fb7eb2bb3..9ab4e198db0 100644 --- a/src/librustc_mir/interpret/operand.rs +++ b/src/librustc_mir/interpret/operand.rs @@ -341,7 +341,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Turn the wide MPlace into a string (must already be dereferenced!) pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> { let len = mplace.len(self)?; - let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?; + let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(u64::from(len)))?; let str = ::std::str::from_utf8(bytes) .map_err(|err| err_ub_format!("this string is not valid UTF-8: {}", err))?; Ok(str) @@ -406,7 +406,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { - Field(field, _) => self.operand_field(base, field.index() as u64)?, + Field(field, _) => self.operand_field(base, u64::try_from(field.index()).unwrap())?, Downcast(_, variant) => self.operand_downcast(base, variant)?, Deref => self.deref_operand(base)?.into(), Subslice { .. } | ConstantIndex { .. } | Index(_) => { @@ -556,11 +556,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // where none should happen. let ptr = Pointer::new( self.tcx.alloc_map.lock().create_memory_alloc(data), - Size::from_bytes(start as u64), // offset: `start` + Size::from_bytes(start.try_into().unwrap()), // offset: `start` ); Operand::Immediate(Immediate::new_slice( self.tag_global_base_pointer(ptr).into(), - (end - start) as u64, // len: `end - start` + u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start` self, )) } @@ -581,7 +581,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .layout .ty .discriminant_for_variant(*self.tcx, index) - .map_or(index.as_u32() as u128, |discr| discr.val); + .map_or(u128::from(index.as_u32()), |discr| discr.val); return Ok((discr_val, index)); } layout::Variants::Multiple { @@ -593,7 +593,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }; // read raw discriminant value - let discr_op = self.operand_field(rval, discr_index as u64)?; + let discr_op = self.operand_field(rval, u64::try_from(discr_index).unwrap())?; let discr_val = self.read_immediate(discr_op)?; let raw_discr = discr_val.to_scalar_or_undef(); trace!("discr value: {:?}", raw_discr); @@ -657,7 +657,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { if !ptr_valid { throw_ub!(InvalidDiscriminant(raw_discr.erase_tag().into())) } - (dataful_variant.as_u32() as u128, dataful_variant) + (u128::from(dataful_variant.as_u32()), dataful_variant) } Ok(raw_discr) => { // We need to use machine arithmetic to get the relative variant idx: @@ -686,7 +686,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { .expect("tagged layout for non adt") .variants .len(); - assert!((variant_index as usize) < variants_len); + assert!(usize::try_from(variant_index).unwrap() < variants_len); (u128::from(variant_index), VariantIdx::from_u32(variant_index)) } else { (u128::from(dataful_variant.as_u32()), dataful_variant) diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs index 6cf11c071e4..7bea5357cdf 100644 --- a/src/librustc_mir/interpret/place.rs +++ b/src/librustc_mir/interpret/place.rs @@ -4,6 +4,7 @@ use std::convert::TryFrom; use std::hash::Hash; +use std::ops::Mul; use rustc::mir; use rustc::mir::interpret::truncate; @@ -405,11 +406,11 @@ where // This can only be reached in ConstProp and non-rustc-MIR. throw_ub!(BoundsCheckFailed { len, index: field }); } - stride * field + Size::mul(stride, field) // `Size` multiplication is checked } layout::FieldPlacement::Union(count) => { assert!( - field < count as u64, + field < u64::try_from(count).unwrap(), "Tried to access field {} of union {:#?} with {} fields", field, base.layout, @@ -420,7 +421,7 @@ where } }; // the only way conversion can fail if is this is an array (otherwise we already panicked - // above). In that case, all fields are equal. + // above). In that case, all fields have the same layout. let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?; // Offset may need adjustment for unsized fields. @@ -465,7 +466,7 @@ where }; let layout = base.layout.field(self, 0)?; let dl = &self.tcx.data_layout; - Ok((0..len).map(move |i| base.offset(i * stride, MemPlaceMeta::None, layout, dl))) + Ok((0..len).map(move |i| base.offset(Size::mul(stride, i), MemPlaceMeta::None, layout, dl))) } fn mplace_subslice( @@ -477,11 +478,11 @@ where ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { let len = base.len(self)?; // also asserts that we have a type where this makes sense let actual_to = if from_end { - if from + to > len { + if from.checked_add(to).map_or(true, |to| to > len) { // This can only be reached in ConstProp and non-rustc-MIR. - throw_ub!(BoundsCheckFailed { len: len as u64, index: from as u64 + to as u64 }); + throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) }); } - len - to + len.checked_sub(to).unwrap() } else { to }; @@ -489,12 +490,12 @@ where // Not using layout method because that works with usize, and does not work with slices // (that have count 0 in their layout). let from_offset = match base.layout.fields { - layout::FieldPlacement::Array { stride, .. } => stride * from, + layout::FieldPlacement::Array { stride, .. } => Size::mul(stride, from), // `Size` multiplication is checked _ => bug!("Unexpected layout of index access: {:#?}", base.layout), }; // Compute meta and new layout - let inner_len = actual_to - from; + let inner_len = actual_to.checked_sub(from).unwrap(); let (meta, ty) = match base.layout.ty.kind { // It is not nice to match on the type, but that seems to be the only way to // implement this. @@ -527,7 +528,7 @@ where ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { - Field(field, _) => self.mplace_field(base, field.index() as u64)?, + Field(field, _) => self.mplace_field(base, u64::try_from(field.index()).unwrap())?, Downcast(_, variant) => self.mplace_downcast(base, variant)?, Deref => self.deref_operand(base.into())?, @@ -541,14 +542,14 @@ where ConstantIndex { offset, min_length, from_end } => { let n = base.len(self)?; - if n < min_length as u64 { + if n < u64::from(min_length) { // This can only be reached in ConstProp and non-rustc-MIR. - throw_ub!(BoundsCheckFailed { len: min_length as u64, index: n as u64 }); + throw_ub!(BoundsCheckFailed { len: min_length.into(), index: n.into() }); } let index = if from_end { - assert!(0 < offset && offset - 1 < min_length); - n - u64::from(offset) + assert!(0 < offset && offset <= min_length); + n.checked_sub(u64::from(offset)).unwrap() } else { assert!(offset < min_length); u64::from(offset) @@ -603,7 +604,7 @@ where ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> { use rustc::mir::ProjectionElem::*; Ok(match *proj_elem { - Field(field, _) => self.place_field(base, field.index() as u64)?, + Field(field, _) => self.place_field(base, u64::try_from(field.index()).unwrap())?, Downcast(_, variant) => self.place_downcast(base, variant)?, Deref => self.deref_operand(self.place_to_op(base)?)?.into(), // For the other variants, we have to force an allocation. @@ -1028,7 +1029,7 @@ where kind: MemoryKind<M::MemoryKind>, ) -> MPlaceTy<'tcx, M::PointerTag> { let ptr = self.memory.allocate_bytes(str.as_bytes(), kind); - let meta = Scalar::from_uint(str.len() as u128, self.pointer_size()); + let meta = Scalar::from_uint(u128::try_from(str.len()).unwrap(), self.pointer_size()); let mplace = MemPlace { ptr: ptr.into(), align: Align::from_bytes(1).unwrap(), @@ -1072,7 +1073,7 @@ where let size = discr_layout.value.size(self); let discr_val = truncate(discr_val, size); - let discr_dest = self.place_field(dest, discr_index as u64)?; + let discr_dest = self.place_field(dest, u64::try_from(discr_index).unwrap())?; self.write_scalar(Scalar::from_uint(discr_val, size), discr_dest)?; } layout::Variants::Multiple { @@ -1103,7 +1104,7 @@ where niche_start_val, )?; // Write result. - let niche_dest = self.place_field(dest, discr_index as u64)?; + let niche_dest = self.place_field(dest, u64::try_from(discr_index).unwrap())?; self.write_immediate(*discr_val, niche_dest)?; } } diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs index cb11df18378..eb33a8700f3 100644 --- a/src/librustc_mir/interpret/step.rs +++ b/src/librustc_mir/interpret/step.rs @@ -2,6 +2,8 @@ //! //! The main entry point is the `step` method. +use std::convert::TryFrom; + use rustc::mir; use rustc::mir::interpret::{InterpResult, PointerArithmetic, Scalar}; use rustc::ty::layout::LayoutOf; @@ -192,7 +194,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // Ignore zero-sized fields. if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); - let field_dest = self.place_field(dest, field_index as u64)?; + let field_dest = + self.place_field(dest, u64::try_from(field_index).unwrap())?; self.copy_op(op, field_dest)?; } } diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs index 6b0bbe4f6e0..9c52af41272 100644 --- a/src/librustc_mir/interpret/terminator.rs +++ b/src/librustc_mir/interpret/terminator.rs @@ -1,4 +1,5 @@ use std::borrow::Cow; +use std::convert::TryFrom; use rustc::ty::layout::{self, LayoutOf, TyLayout}; use rustc::ty::Instance; @@ -29,6 +30,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { trace!("SwitchInt({:?})", *discr); // Branch to the `otherwise` case by default, if no match is found. + assert!(targets.len() > 0); let mut target_block = targets[targets.len() - 1]; for (index, &const_int) in values.iter().enumerate() { @@ -392,7 +394,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { }; // Find and consult vtable let vtable = receiver_place.vtable(); - let drop_fn = self.get_vtable_slot(vtable, idx)?; + let drop_fn = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?; // `*mut receiver_place.layout.ty` is almost the layout that we // want for args[0]: We have to project to field 0 because we want diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs index efbbca53485..10f746e135a 100644 --- a/src/librustc_mir/interpret/traits.rs +++ b/src/librustc_mir/interpret/traits.rs @@ -1,9 +1,11 @@ -use super::{FnVal, InterpCx, Machine, MemoryKind}; +use std::ops::Mul; use rustc::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar}; use rustc::ty::layout::{Align, HasDataLayout, LayoutOf, Size}; use rustc::ty::{self, Instance, Ty, TypeFoldable}; +use super::{FnVal, InterpCx, Machine, MemoryKind}; + impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { /// Creates a dynamic vtable for the given type and vtable origin. This is used only for /// objects. @@ -103,11 +105,12 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { pub fn get_vtable_slot( &self, vtable: Scalar<M::PointerTag>, - idx: usize, + idx: u64, ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> { let ptr_size = self.pointer_size(); // Skip over the 'drop_ptr', 'size', and 'align' fields. - let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?; + let vtable_slot = + vtable.ptr_offset(Size::mul(ptr_size, idx.checked_add(3).unwrap()), self)?; let vtable_slot = self .memory .check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)? diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs index 6f9543bf95a..164478362bf 100644 --- a/src/librustc_mir/interpret/validity.rs +++ b/src/librustc_mir/interpret/validity.rs @@ -4,11 +4,12 @@ //! That's useful because it means other passes (e.g. promotion) can rely on `const`s //! to be const-safe. +use std::convert::TryFrom; use std::fmt::Write; -use std::ops::RangeInclusive; +use std::ops::{Mul, RangeInclusive}; use rustc::ty; -use rustc::ty::layout::{self, LayoutOf, TyLayout, VariantIdx}; +use rustc::ty::layout::{self, LayoutOf, Size, TyLayout, VariantIdx}; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; use rustc_span::symbol::{sym, Symbol}; @@ -747,7 +748,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> // This is the element type size. let layout = self.ecx.layout_of(tys)?; // This is the size in bytes of the whole array. - let size = layout.size * len; + let size = Size::mul(layout.size, len); // Size is not 0, get a pointer. let ptr = self.ecx.force_ptr(mplace.ptr)?; @@ -777,7 +778,8 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M> // Some byte was undefined, determine which // element that byte belongs to so we can // provide an index. - let i = (ptr.offset.bytes() / layout.size.bytes()) as usize; + let i = usize::try_from(ptr.offset.bytes() / layout.size.bytes()) + .unwrap(); self.path.push(PathElem::ArrayElem(i)); throw_validation_failure!("undefined bytes", self.path) diff --git a/src/librustc_mir/interpret/visitor.rs b/src/librustc_mir/interpret/visitor.rs index 8808fc70cf7..f80ca3d4b87 100644 --- a/src/librustc_mir/interpret/visitor.rs +++ b/src/librustc_mir/interpret/visitor.rs @@ -1,6 +1,8 @@ //! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound //! types until we arrive at the leaves, with custom handling for primitive types. +use std::convert::TryFrom; + use rustc::mir::interpret::InterpResult; use rustc::ty; use rustc::ty::layout::{self, TyLayout, VariantIdx}; @@ -206,7 +208,7 @@ macro_rules! make_value_visitor { // errors: Projecting to a field needs access to `ecx`. let fields: Vec<InterpResult<'tcx, Self::V>> = (0..offsets.len()).map(|i| { - v.project_field(self.ecx(), i as u64) + v.project_field(self.ecx(), u64::try_from(i).unwrap()) }) .collect(); self.visit_aggregate(v, fields.into_iter())?; |
