about summary refs log tree commit diff
diff options
context:
space:
mode:
authorDylan DPC <dylan.dpc@gmail.com>2020-03-25 19:28:08 +0100
committerGitHub <noreply@github.com>2020-03-25 19:28:08 +0100
commit97f0a9ef8d3a563eeae1966ff5549400783d8e1f (patch)
tree7a59fa534aabe9993c1f11ba6136d4f550bb2fdf
parent3c1d9adb3cb3aad4233075fa296fc3c70b42cdb8 (diff)
parent7400955e941a3958b1560f2cb0b7648535d2f9d0 (diff)
downloadrust-97f0a9ef8d3a563eeae1966ff5549400783d8e1f.tar.gz
rust-97f0a9ef8d3a563eeae1966ff5549400783d8e1f.zip
Rollup merge of #70226 - RalfJung:checked, r=oli-obk
use checked casts and arithmetic in Miri engine

This is unfortunately pretty annoying because we have to cast back and forth between `u64` and `usize` more often that should be necessary, and that cast is considered fallible.

For example, should [this](https://doc.rust-lang.org/nightly/nightly-rustc/rustc/mir/interpret/value/enum.ConstValue.html) really be `usize`?

Also, `LayoutDetails` uses `usize` for field indices, but in Miri we use `u64` to be able to also handle array indexing. Maybe methods like `mplace_field` should be suitably generalized to accept both `u64` and `usize`?

r? @oli-obk Cc @eddyb
-rw-r--r--src/librustc/mir/interpret/allocation.rs65
-rw-r--r--src/librustc/mir/interpret/mod.rs43
-rw-r--r--src/librustc/mir/interpret/pointer.rs15
-rw-r--r--src/librustc/mir/interpret/value.rs106
-rw-r--r--src/librustc/ty/print/pretty.rs4
-rw-r--r--src/librustc_ast/ast.rs6
-rw-r--r--src/librustc_codegen_llvm/intrinsic.rs34
-rw-r--r--src/librustc_codegen_ssa/mir/operand.rs2
-rw-r--r--src/librustc_mir/const_eval/mod.rs11
-rw-r--r--src/librustc_mir/interpret/cast.rs21
-rw-r--r--src/librustc_mir/interpret/eval_context.rs3
-rw-r--r--src/librustc_mir/interpret/intrinsics.rs16
-rw-r--r--src/librustc_mir/interpret/intrinsics/caller_location.rs6
-rw-r--r--src/librustc_mir/interpret/memory.rs31
-rw-r--r--src/librustc_mir/interpret/operand.rs36
-rw-r--r--src/librustc_mir/interpret/operator.rs25
-rw-r--r--src/librustc_mir/interpret/place.rs118
-rw-r--r--src/librustc_mir/interpret/step.rs2
-rw-r--r--src/librustc_mir/interpret/terminator.rs8
-rw-r--r--src/librustc_mir/interpret/traits.rs14
-rw-r--r--src/librustc_mir/interpret/validity.rs6
-rw-r--r--src/librustc_mir/interpret/visitor.rs17
-rw-r--r--src/librustc_mir_build/hair/pattern/_match.rs6
-rw-r--r--src/librustc_target/abi/mod.rs22
24 files changed, 337 insertions, 280 deletions
diff --git a/src/librustc/mir/interpret/allocation.rs b/src/librustc/mir/interpret/allocation.rs
index 946b6add40a..26b9e1be2f5 100644
--- a/src/librustc/mir/interpret/allocation.rs
+++ b/src/librustc/mir/interpret/allocation.rs
@@ -1,18 +1,20 @@
 //! The virtual memory representation of the MIR interpreter.
 
+use std::borrow::Cow;
+use std::convert::TryFrom;
+use std::iter;
+use std::ops::{Deref, DerefMut, Range};
+
+use rustc_ast::ast::Mutability;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_target::abi::HasDataLayout;
+
 use super::{
     read_target_uint, write_target_uint, AllocId, InterpResult, Pointer, Scalar, ScalarMaybeUndef,
 };
 
 use crate::ty::layout::{Align, Size};
 
-use rustc_ast::ast::Mutability;
-use rustc_data_structures::sorted_map::SortedMap;
-use rustc_target::abi::HasDataLayout;
-use std::borrow::Cow;
-use std::iter;
-use std::ops::{Deref, DerefMut, Range};
-
 // NOTE: When adding new fields, make sure to adjust the `Snapshot` impl in
 // `src/librustc_mir/interpret/snapshot.rs`.
 #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, RustcEncodable, RustcDecodable)]
@@ -90,7 +92,7 @@ impl<Tag> Allocation<Tag> {
     /// Creates a read-only allocation initialized by the given bytes
     pub fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align) -> Self {
         let bytes = slice.into().into_owned();
-        let size = Size::from_bytes(bytes.len() as u64);
+        let size = Size::from_bytes(bytes.len());
         Self {
             bytes,
             relocations: Relocations::new(),
@@ -107,9 +109,8 @@ impl<Tag> Allocation<Tag> {
     }
 
     pub fn undef(size: Size, align: Align) -> Self {
-        assert_eq!(size.bytes() as usize as u64, size.bytes());
         Allocation {
-            bytes: vec![0; size.bytes() as usize],
+            bytes: vec![0; size.bytes_usize()],
             relocations: Relocations::new(),
             undef_mask: UndefMask::new(size, false),
             size,
@@ -152,7 +153,7 @@ impl Allocation<(), ()> {
 /// Raw accessors. Provide access to otherwise private bytes.
 impl<Tag, Extra> Allocation<Tag, Extra> {
     pub fn len(&self) -> usize {
-        self.size.bytes() as usize
+        self.size.bytes_usize()
     }
 
     /// Looks at a slice which may describe undefined bytes or describe a relocation. This differs
@@ -183,12 +184,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
     #[inline]
     fn check_bounds(&self, offset: Size, size: Size) -> Range<usize> {
         let end = offset + size; // This does overflow checking.
-        assert_eq!(
-            end.bytes() as usize as u64,
-            end.bytes(),
-            "cannot handle this access on this host architecture"
-        );
-        let end = end.bytes() as usize;
+        let end = usize::try_from(end.bytes()).expect("access too big for this host architecture");
         assert!(
             end <= self.len(),
             "Out-of-bounds access at offset {}, size {} in allocation of size {}",
@@ -196,7 +192,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
             size.bytes(),
             self.len()
         );
-        (offset.bytes() as usize)..end
+        offset.bytes_usize()..end
     }
 
     /// The last argument controls whether we error out when there are undefined
@@ -294,11 +290,10 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
         cx: &impl HasDataLayout,
         ptr: Pointer<Tag>,
     ) -> InterpResult<'tcx, &[u8]> {
-        assert_eq!(ptr.offset.bytes() as usize as u64, ptr.offset.bytes());
-        let offset = ptr.offset.bytes() as usize;
+        let offset = ptr.offset.bytes_usize();
         Ok(match self.bytes[offset..].iter().position(|&c| c == 0) {
             Some(size) => {
-                let size_with_null = Size::from_bytes((size + 1) as u64);
+                let size_with_null = Size::from_bytes(size) + Size::from_bytes(1);
                 // Go through `get_bytes` for checks and AllocationExtra hooks.
                 // We read the null, so we include it in the request, but we want it removed
                 // from the result, so we do subslicing.
@@ -343,7 +338,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
         let (lower, upper) = src.size_hint();
         let len = upper.expect("can only write bounded iterators");
         assert_eq!(lower, len, "can only write iterators with a precise length");
-        let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len as u64))?;
+        let bytes = self.get_bytes_mut(cx, ptr, Size::from_bytes(len))?;
         // `zip` would stop when the first iterator ends; we want to definitely
         // cover all of `bytes`.
         for dest in bytes {
@@ -386,7 +381,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
         } else {
             match self.relocations.get(&ptr.offset) {
                 Some(&(tag, alloc_id)) => {
-                    let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits as u64), tag);
+                    let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag);
                     return Ok(ScalarMaybeUndef::Scalar(ptr.into()));
                 }
                 None => {}
@@ -433,7 +428,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
         };
 
         let bytes = match val.to_bits_or_ptr(type_size, cx) {
-            Err(val) => val.offset.bytes() as u128,
+            Err(val) => u128::from(val.offset.bytes()),
             Ok(data) => data,
         };
 
@@ -524,7 +519,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
             )
         };
         let start = ptr.offset;
-        let end = start + size;
+        let end = start + size; // `Size` addition
 
         // Mark parts of the outermost relocations as undefined if they partially fall outside the
         // given range.
@@ -563,7 +558,7 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
     #[inline]
     fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
         self.undef_mask
-            .is_range_defined(ptr.offset, ptr.offset + size)
+            .is_range_defined(ptr.offset, ptr.offset + size) // `Size` addition
             .or_else(|idx| throw_ub!(InvalidUndefBytes(Some(Pointer::new(ptr.alloc_id, idx)))))
     }
 
@@ -643,7 +638,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
         if defined.ranges.len() <= 1 {
             self.undef_mask.set_range_inbounds(
                 dest.offset,
-                dest.offset + size * repeat,
+                dest.offset + size * repeat, // `Size` operations
                 defined.initial,
             );
             return;
@@ -721,10 +716,10 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
         for i in 0..length {
             new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
                 // compute offset for current repetition
-                let dest_offset = dest.offset + (i * size);
+                let dest_offset = dest.offset + size * i; // `Size` operations
                 (
                     // shift offsets from source allocation to destination allocation
-                    offset + dest_offset - src.offset,
+                    (offset + dest_offset) - src.offset, // `Size` operations
                     reloc,
                 )
             }));
@@ -861,18 +856,18 @@ impl UndefMask {
         if amount.bytes() == 0 {
             return;
         }
-        let unused_trailing_bits = self.blocks.len() as u64 * Self::BLOCK_SIZE - self.len.bytes();
+        let unused_trailing_bits =
+            u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
         if amount.bytes() > unused_trailing_bits {
             let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
-            assert_eq!(additional_blocks as usize as u64, additional_blocks);
             self.blocks.extend(
                 // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
-                iter::repeat(0).take(additional_blocks as usize),
+                iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
             );
         }
         let start = self.len;
         self.len += amount;
-        self.set_range_inbounds(start, start + amount, new_state);
+        self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
     }
 }
 
@@ -881,7 +876,5 @@ fn bit_index(bits: Size) -> (usize, usize) {
     let bits = bits.bytes();
     let a = bits / UndefMask::BLOCK_SIZE;
     let b = bits % UndefMask::BLOCK_SIZE;
-    assert_eq!(a as usize as u64, a);
-    assert_eq!(b as usize as u64, b);
-    (a as usize, b as usize)
+    (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
 }
diff --git a/src/librustc/mir/interpret/mod.rs b/src/librustc/mir/interpret/mod.rs
index 1b5fb4c9954..10c3a06da08 100644
--- a/src/librustc/mir/interpret/mod.rs
+++ b/src/librustc/mir/interpret/mod.rs
@@ -95,6 +95,27 @@ mod pointer;
 mod queries;
 mod value;
 
+use std::convert::TryFrom;
+use std::fmt;
+use std::io;
+use std::num::NonZeroU32;
+use std::sync::atomic::{AtomicU32, Ordering};
+
+use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt};
+use rustc_ast::ast::LitKind;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::{HashMapExt, Lock};
+use rustc_data_structures::tiny_list::TinyList;
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_serialize::{Decodable, Encodable, Encoder};
+
+use crate::mir;
+use crate::ty::codec::TyDecoder;
+use crate::ty::layout::{self, Size};
+use crate::ty::subst::GenericArgKind;
+use crate::ty::{self, Instance, Ty, TyCtxt};
+
 pub use self::error::{
     struct_error, ConstEvalErr, ConstEvalRawResult, ConstEvalResult, ErrorHandled, FrameInfo,
     InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo, MachineStopType,
@@ -107,24 +128,6 @@ pub use self::allocation::{Allocation, AllocationExtra, Relocations, UndefMask};
 
 pub use self::pointer::{CheckInAllocMsg, Pointer, PointerArithmetic};
 
-use crate::mir;
-use crate::ty::codec::TyDecoder;
-use crate::ty::layout::{self, Size};
-use crate::ty::subst::GenericArgKind;
-use crate::ty::{self, Instance, Ty, TyCtxt};
-use byteorder::{BigEndian, LittleEndian, ReadBytesExt, WriteBytesExt};
-use rustc_ast::ast::LitKind;
-use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::sync::{HashMapExt, Lock};
-use rustc_data_structures::tiny_list::TinyList;
-use rustc_hir::def_id::DefId;
-use rustc_macros::HashStable;
-use rustc_serialize::{Decodable, Encodable, Encoder};
-use std::fmt;
-use std::io;
-use std::num::NonZeroU32;
-use std::sync::atomic::{AtomicU32, Ordering};
-
 /// Uniquely identifies one of the following:
 /// - A constant
 /// - A static
@@ -264,8 +267,8 @@ impl<'s> AllocDecodingSession<'s> {
         D: TyDecoder<'tcx>,
     {
         // Read the index of the allocation.
-        let idx = decoder.read_u32()? as usize;
-        let pos = self.state.data_offsets[idx] as usize;
+        let idx = usize::try_from(decoder.read_u32()?).unwrap();
+        let pos = usize::try_from(self.state.data_offsets[idx]).unwrap();
 
         // Decode the `AllocDiscriminant` now so that we know if we have to reserve an
         // `AllocId`.
diff --git a/src/librustc/mir/interpret/pointer.rs b/src/librustc/mir/interpret/pointer.rs
index 7d862d43bba..3f841cfb330 100644
--- a/src/librustc/mir/interpret/pointer.rs
+++ b/src/librustc/mir/interpret/pointer.rs
@@ -62,9 +62,9 @@ pub trait PointerArithmetic: layout::HasDataLayout {
     /// This should be called by all the other methods before returning!
     #[inline]
     fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) {
-        let val = val as u128;
+        let val = u128::from(val);
         let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
-        ((val % max_ptr_plus_1) as u64, over || val >= max_ptr_plus_1)
+        (u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1)
     }
 
     #[inline]
@@ -73,10 +73,8 @@ pub trait PointerArithmetic: layout::HasDataLayout {
         self.truncate_to_ptr(res)
     }
 
-    // Overflow checking only works properly on the range from -u64 to +u64.
     #[inline]
-    fn overflowing_signed_offset(&self, val: u64, i: i128) -> (u64, bool) {
-        // FIXME: is it possible to over/underflow here?
+    fn overflowing_signed_offset(&self, val: u64, i: i64) -> (u64, bool) {
         if i < 0 {
             // Trickery to ensure that `i64::MIN` works fine: compute `n = -i`.
             // This formula only works for true negative values; it overflows for zero!
@@ -84,6 +82,7 @@ pub trait PointerArithmetic: layout::HasDataLayout {
             let res = val.overflowing_sub(n);
             self.truncate_to_ptr(res)
         } else {
+            // `i >= 0`, so the cast is safe.
             self.overflowing_offset(val, i as u64)
         }
     }
@@ -96,7 +95,7 @@ pub trait PointerArithmetic: layout::HasDataLayout {
 
     #[inline]
     fn signed_offset<'tcx>(&self, val: u64, i: i64) -> InterpResult<'tcx, u64> {
-        let (res, over) = self.overflowing_signed_offset(val, i128::from(i));
+        let (res, over) = self.overflowing_signed_offset(val, i);
         if over { throw_ub!(PointerArithOverflow) } else { Ok(res) }
     }
 }
@@ -189,14 +188,14 @@ impl<'tcx, Tag> Pointer<Tag> {
     }
 
     #[inline]
-    pub fn overflowing_signed_offset(self, i: i128, cx: &impl HasDataLayout) -> (Self, bool) {
+    pub fn overflowing_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> (Self, bool) {
         let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
         (Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over)
     }
 
     #[inline(always)]
     pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
-        self.overflowing_signed_offset(i128::from(i), cx).0
+        self.overflowing_signed_offset(i, cx).0
     }
 
     #[inline(always)]
diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs
index 59e6b1b0c37..706cf1cd09a 100644
--- a/src/librustc/mir/interpret/value.rs
+++ b/src/librustc/mir/interpret/value.rs
@@ -1,9 +1,12 @@
+use std::convert::TryFrom;
+use std::fmt;
+
 use rustc_apfloat::{
     ieee::{Double, Single},
     Float,
 };
 use rustc_macros::HashStable;
-use std::fmt;
+use rustc_target::abi::TargetDataLayout;
 
 use crate::ty::{
     layout::{HasDataLayout, Size},
@@ -156,7 +159,7 @@ impl Scalar<()> {
     #[inline(always)]
     fn check_data(data: u128, size: u8) {
         debug_assert_eq!(
-            truncate(data, Size::from_bytes(size as u64)),
+            truncate(data, Size::from_bytes(u64::from(size))),
             data,
             "Scalar value {:#x} exceeds size of {} bytes",
             data,
@@ -198,55 +201,54 @@ impl<'tcx, Tag> Scalar<Tag> {
         Scalar::Raw { data: 0, size: 0 }
     }
 
-    #[inline]
-    pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
-        let dl = cx.data_layout();
+    #[inline(always)]
+    fn ptr_op(
+        self,
+        dl: &TargetDataLayout,
+        f_int: impl FnOnce(u64) -> InterpResult<'tcx, u64>,
+        f_ptr: impl FnOnce(Pointer<Tag>) -> InterpResult<'tcx, Pointer<Tag>>,
+    ) -> InterpResult<'tcx, Self> {
         match self {
             Scalar::Raw { data, size } => {
-                assert_eq!(size as u64, dl.pointer_size.bytes());
-                Ok(Scalar::Raw { data: dl.offset(data as u64, i.bytes())? as u128, size })
+                assert_eq!(u64::from(size), dl.pointer_size.bytes());
+                Ok(Scalar::Raw { data: u128::from(f_int(u64::try_from(data).unwrap())?), size })
             }
-            Scalar::Ptr(ptr) => ptr.offset(i, dl).map(Scalar::Ptr),
+            Scalar::Ptr(ptr) => Ok(Scalar::Ptr(f_ptr(ptr)?)),
         }
     }
 
     #[inline]
+    pub fn ptr_offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
+        let dl = cx.data_layout();
+        self.ptr_op(dl, |int| dl.offset(int, i.bytes()), |ptr| ptr.offset(i, dl))
+    }
+
+    #[inline]
     pub fn ptr_wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self {
         let dl = cx.data_layout();
-        match self {
-            Scalar::Raw { data, size } => {
-                assert_eq!(size as u64, dl.pointer_size.bytes());
-                Scalar::Raw { data: dl.overflowing_offset(data as u64, i.bytes()).0 as u128, size }
-            }
-            Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_offset(i, dl)),
-        }
+        self.ptr_op(
+            dl,
+            |int| Ok(dl.overflowing_offset(int, i.bytes()).0),
+            |ptr| Ok(ptr.wrapping_offset(i, dl)),
+        )
+        .unwrap()
     }
 
     #[inline]
     pub fn ptr_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
         let dl = cx.data_layout();
-        match self {
-            Scalar::Raw { data, size } => {
-                assert_eq!(size as u64, dl.pointer_size().bytes());
-                Ok(Scalar::Raw { data: dl.signed_offset(data as u64, i)? as u128, size })
-            }
-            Scalar::Ptr(ptr) => ptr.signed_offset(i, dl).map(Scalar::Ptr),
-        }
+        self.ptr_op(dl, |int| dl.signed_offset(int, i), |ptr| ptr.signed_offset(i, dl))
     }
 
     #[inline]
     pub fn ptr_wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
         let dl = cx.data_layout();
-        match self {
-            Scalar::Raw { data, size } => {
-                assert_eq!(size as u64, dl.pointer_size.bytes());
-                Scalar::Raw {
-                    data: dl.overflowing_signed_offset(data as u64, i128::from(i)).0 as u128,
-                    size,
-                }
-            }
-            Scalar::Ptr(ptr) => Scalar::Ptr(ptr.wrapping_signed_offset(i, dl)),
-        }
+        self.ptr_op(
+            dl,
+            |int| Ok(dl.overflowing_signed_offset(int, i).0),
+            |ptr| Ok(ptr.wrapping_signed_offset(i, dl)),
+        )
+        .unwrap()
     }
 
     #[inline]
@@ -281,25 +283,25 @@ impl<'tcx, Tag> Scalar<Tag> {
     #[inline]
     pub fn from_u8(i: u8) -> Self {
         // Guaranteed to be truncated and does not need sign extension.
-        Scalar::Raw { data: i as u128, size: 1 }
+        Scalar::Raw { data: i.into(), size: 1 }
     }
 
     #[inline]
     pub fn from_u16(i: u16) -> Self {
         // Guaranteed to be truncated and does not need sign extension.
-        Scalar::Raw { data: i as u128, size: 2 }
+        Scalar::Raw { data: i.into(), size: 2 }
     }
 
     #[inline]
     pub fn from_u32(i: u32) -> Self {
         // Guaranteed to be truncated and does not need sign extension.
-        Scalar::Raw { data: i as u128, size: 4 }
+        Scalar::Raw { data: i.into(), size: 4 }
     }
 
     #[inline]
     pub fn from_u64(i: u64) -> Self {
         // Guaranteed to be truncated and does not need sign extension.
-        Scalar::Raw { data: i as u128, size: 8 }
+        Scalar::Raw { data: i.into(), size: 8 }
     }
 
     #[inline]
@@ -376,7 +378,7 @@ impl<'tcx, Tag> Scalar<Tag> {
         assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
         match self {
             Scalar::Raw { data, size } => {
-                assert_eq!(target_size.bytes(), size as u64);
+                assert_eq!(target_size.bytes(), u64::from(size));
                 Scalar::check_data(data, size);
                 Ok(data)
             }
@@ -394,7 +396,7 @@ impl<'tcx, Tag> Scalar<Tag> {
         assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
         match self {
             Scalar::Raw { data, size } => {
-                assert_eq!(target_size.bytes(), size as u64);
+                assert_eq!(target_size.bytes(), u64::from(size));
                 Scalar::check_data(data, size);
                 Ok(data)
             }
@@ -458,27 +460,27 @@ impl<'tcx, Tag> Scalar<Tag> {
 
     /// Converts the scalar to produce an `u8`. Fails if the scalar is a pointer.
     pub fn to_u8(self) -> InterpResult<'static, u8> {
-        self.to_unsigned_with_bit_width(8).map(|v| v as u8)
+        self.to_unsigned_with_bit_width(8).map(|v| u8::try_from(v).unwrap())
     }
 
     /// Converts the scalar to produce an `u16`. Fails if the scalar is a pointer.
     pub fn to_u16(self) -> InterpResult<'static, u16> {
-        self.to_unsigned_with_bit_width(16).map(|v| v as u16)
+        self.to_unsigned_with_bit_width(16).map(|v| u16::try_from(v).unwrap())
     }
 
     /// Converts the scalar to produce an `u32`. Fails if the scalar is a pointer.
     pub fn to_u32(self) -> InterpResult<'static, u32> {
-        self.to_unsigned_with_bit_width(32).map(|v| v as u32)
+        self.to_unsigned_with_bit_width(32).map(|v| u32::try_from(v).unwrap())
     }
 
     /// Converts the scalar to produce an `u64`. Fails if the scalar is a pointer.
     pub fn to_u64(self) -> InterpResult<'static, u64> {
-        self.to_unsigned_with_bit_width(64).map(|v| v as u64)
+        self.to_unsigned_with_bit_width(64).map(|v| u64::try_from(v).unwrap())
     }
 
     pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'static, u64> {
         let b = self.to_bits(cx.data_layout().pointer_size)?;
-        Ok(b as u64)
+        Ok(u64::try_from(b).unwrap())
     }
 
     #[inline]
@@ -490,41 +492,41 @@ impl<'tcx, Tag> Scalar<Tag> {
 
     /// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer.
     pub fn to_i8(self) -> InterpResult<'static, i8> {
-        self.to_signed_with_bit_width(8).map(|v| v as i8)
+        self.to_signed_with_bit_width(8).map(|v| i8::try_from(v).unwrap())
     }
 
     /// Converts the scalar to produce an `i16`. Fails if the scalar is a pointer.
     pub fn to_i16(self) -> InterpResult<'static, i16> {
-        self.to_signed_with_bit_width(16).map(|v| v as i16)
+        self.to_signed_with_bit_width(16).map(|v| i16::try_from(v).unwrap())
     }
 
     /// Converts the scalar to produce an `i32`. Fails if the scalar is a pointer.
     pub fn to_i32(self) -> InterpResult<'static, i32> {
-        self.to_signed_with_bit_width(32).map(|v| v as i32)
+        self.to_signed_with_bit_width(32).map(|v| i32::try_from(v).unwrap())
     }
 
     /// Converts the scalar to produce an `i64`. Fails if the scalar is a pointer.
     pub fn to_i64(self) -> InterpResult<'static, i64> {
-        self.to_signed_with_bit_width(64).map(|v| v as i64)
+        self.to_signed_with_bit_width(64).map(|v| i64::try_from(v).unwrap())
     }
 
     pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'static, i64> {
         let sz = cx.data_layout().pointer_size;
         let b = self.to_bits(sz)?;
         let b = sign_extend(b, sz) as i128;
-        Ok(b as i64)
+        Ok(i64::try_from(b).unwrap())
     }
 
     #[inline]
     pub fn to_f32(self) -> InterpResult<'static, Single> {
         // Going through `u32` to check size and truncation.
-        Ok(Single::from_bits(self.to_u32()? as u128))
+        Ok(Single::from_bits(self.to_u32()?.into()))
     }
 
     #[inline]
     pub fn to_f64(self) -> InterpResult<'static, Double> {
         // Going through `u64` to check size and truncation.
-        Ok(Double::from_bits(self.to_u64()? as u128))
+        Ok(Double::from_bits(self.to_u64()?.into()))
     }
 }
 
@@ -671,8 +673,8 @@ pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) ->
         data.get_bytes(
             cx,
             // invent a pointer, only the offset is relevant anyway
-            Pointer::new(AllocId(0), Size::from_bytes(start as u64)),
-            Size::from_bytes(len as u64),
+            Pointer::new(AllocId(0), Size::from_bytes(start)),
+            Size::from_bytes(len),
         )
         .unwrap_or_else(|err| bug!("const slice is invalid: {:?}", err))
     } else {
diff --git a/src/librustc/ty/print/pretty.rs b/src/librustc/ty/print/pretty.rs
index 8d5d6247f5c..b1626d95eb3 100644
--- a/src/librustc/ty/print/pretty.rs
+++ b/src/librustc/ty/print/pretty.rs
@@ -981,7 +981,7 @@ pub trait PrettyPrinter<'tcx>:
                     .alloc_map
                     .lock()
                     .unwrap_memory(ptr.alloc_id)
-                    .get_bytes(&self.tcx(), ptr, Size::from_bytes(*data as u64))
+                    .get_bytes(&self.tcx(), ptr, Size::from_bytes(*data))
                     .unwrap();
                 p!(pretty_print_byte_str(byte_str));
             }
@@ -1169,7 +1169,7 @@ pub trait PrettyPrinter<'tcx>:
             (ConstValue::ByRef { alloc, offset }, ty::Array(t, n)) if *t == u8_type => {
                 let n = n.val.try_to_bits(self.tcx().data_layout.pointer_size).unwrap();
                 // cast is ok because we already checked for pointer size (32 or 64 bit) above
-                let n = Size::from_bytes(n as u64);
+                let n = Size::from_bytes(n);
                 let ptr = Pointer::new(AllocId(0), offset);
 
                 let byte_str = alloc.get_bytes(&self.tcx(), ptr, n).unwrap();
diff --git a/src/librustc_ast/ast.rs b/src/librustc_ast/ast.rs
index c796a375531..3e7fb0e73bf 100644
--- a/src/librustc_ast/ast.rs
+++ b/src/librustc_ast/ast.rs
@@ -1614,7 +1614,7 @@ impl FloatTy {
         }
     }
 
-    pub fn bit_width(self) -> usize {
+    pub fn bit_width(self) -> u64 {
         match self {
             FloatTy::F32 => 32,
             FloatTy::F64 => 64,
@@ -1663,7 +1663,7 @@ impl IntTy {
         format!("{}{}", val as u128, self.name_str())
     }
 
-    pub fn bit_width(&self) -> Option<usize> {
+    pub fn bit_width(&self) -> Option<u64> {
         Some(match *self {
             IntTy::Isize => return None,
             IntTy::I8 => 8,
@@ -1725,7 +1725,7 @@ impl UintTy {
         format!("{}{}", val, self.name_str())
     }
 
-    pub fn bit_width(&self) -> Option<usize> {
+    pub fn bit_width(&self) -> Option<u64> {
         Some(match *self {
             UintTy::Usize => return None,
             UintTy::U8 => 8,
diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs
index 95982c860f3..bc25b9496d9 100644
--- a/src/librustc_codegen_llvm/intrinsic.rs
+++ b/src/librustc_codegen_llvm/intrinsic.rs
@@ -1172,8 +1172,8 @@ fn generic_simd_intrinsic(
         let m_len = match in_ty.kind {
             // Note that this `.unwrap()` crashes for isize/usize, that's sort
             // of intentional as there's not currently a use case for that.
-            ty::Int(i) => i.bit_width().unwrap() as u64,
-            ty::Uint(i) => i.bit_width().unwrap() as u64,
+            ty::Int(i) => i.bit_width().unwrap(),
+            ty::Uint(i) => i.bit_width().unwrap(),
             _ => return_error!("`{}` is not an integral type", in_ty),
         };
         require_simd!(arg_tys[1], "argument");
@@ -1354,20 +1354,18 @@ fn generic_simd_intrinsic(
         // trailing bits.
         let expected_int_bits = in_len.max(8);
         match ret_ty.kind {
-            ty::Uint(i) if i.bit_width() == Some(expected_int_bits as usize) => (),
+            ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => (),
             _ => return_error!("bitmask `{}`, expected `u{}`", ret_ty, expected_int_bits),
         }
 
         // Integer vector <i{in_bitwidth} x in_len>:
         let (i_xn, in_elem_bitwidth) = match in_elem.kind {
-            ty::Int(i) => (
-                args[0].immediate(),
-                i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _),
-            ),
-            ty::Uint(i) => (
-                args[0].immediate(),
-                i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits() as _),
-            ),
+            ty::Int(i) => {
+                (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
+            }
+            ty::Uint(i) => {
+                (args[0].immediate(), i.bit_width().unwrap_or(bx.data_layout().pointer_size.bits()))
+            }
             _ => return_error!(
                 "vector argument `{}`'s element type `{}`, expected integer element type",
                 in_ty,
@@ -1378,16 +1376,16 @@ fn generic_simd_intrinsic(
         // Shift the MSB to the right by "in_elem_bitwidth - 1" into the first bit position.
         let shift_indices =
             vec![
-                bx.cx.const_int(bx.type_ix(in_elem_bitwidth as _), (in_elem_bitwidth - 1) as _);
+                bx.cx.const_int(bx.type_ix(in_elem_bitwidth), (in_elem_bitwidth - 1) as _);
                 in_len as _
             ];
         let i_xn_msb = bx.lshr(i_xn, bx.const_vector(shift_indices.as_slice()));
         // Truncate vector to an <i1 x N>
-        let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len as _));
+        let i1xn = bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len));
         // Bitcast <i1 x N> to iN:
-        let i_ = bx.bitcast(i1xn, bx.type_ix(in_len as _));
+        let i_ = bx.bitcast(i1xn, bx.type_ix(in_len));
         // Zero-extend iN to the bitmask type:
-        return Ok(bx.zext(i_, bx.type_ix(expected_int_bits as _)));
+        return Ok(bx.zext(i_, bx.type_ix(expected_int_bits)));
     }
 
     fn simd_simple_float_intrinsic(
@@ -2099,7 +2097,7 @@ fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, boo
     match ty.kind {
         ty::Int(t) => Some((
             match t {
-                ast::IntTy::Isize => cx.tcx.sess.target.ptr_width as u64,
+                ast::IntTy::Isize => u64::from(cx.tcx.sess.target.ptr_width),
                 ast::IntTy::I8 => 8,
                 ast::IntTy::I16 => 16,
                 ast::IntTy::I32 => 32,
@@ -2110,7 +2108,7 @@ fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, boo
         )),
         ty::Uint(t) => Some((
             match t {
-                ast::UintTy::Usize => cx.tcx.sess.target.ptr_width as u64,
+                ast::UintTy::Usize => u64::from(cx.tcx.sess.target.ptr_width),
                 ast::UintTy::U8 => 8,
                 ast::UintTy::U16 => 16,
                 ast::UintTy::U32 => 32,
@@ -2127,7 +2125,7 @@ fn int_type_width_signed(ty: Ty<'_>, cx: &CodegenCx<'_, '_>) -> Option<(u64, boo
 // Returns None if the type is not a float
 fn float_type_width(ty: Ty<'_>) -> Option<u64> {
     match ty.kind {
-        ty::Float(t) => Some(t.bit_width() as u64),
+        ty::Float(t) => Some(t.bit_width()),
         _ => None,
     }
 }
diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs
index 1e1fede2588..5bb30d03d9f 100644
--- a/src/librustc_codegen_ssa/mir/operand.rs
+++ b/src/librustc_codegen_ssa/mir/operand.rs
@@ -91,7 +91,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
                 };
                 let a = Scalar::from(Pointer::new(
                     bx.tcx().alloc_map.lock().create_memory_alloc(data),
-                    Size::from_bytes(start as u64),
+                    Size::from_bytes(start),
                 ));
                 let a_llval = bx.scalar_to_backend(
                     a,
diff --git a/src/librustc_mir/const_eval/mod.rs b/src/librustc_mir/const_eval/mod.rs
index 605091d6c7d..6e7e6f9d345 100644
--- a/src/librustc_mir/const_eval/mod.rs
+++ b/src/librustc_mir/const_eval/mod.rs
@@ -1,5 +1,7 @@
 // Not in interpret to make sure we do not use private implementation details
 
+use std::convert::TryFrom;
+
 use rustc::mir;
 use rustc::ty::layout::VariantIdx;
 use rustc::ty::{self, TyCtxt};
@@ -37,7 +39,7 @@ pub(crate) fn const_field<'tcx>(
         Some(variant) => ecx.operand_downcast(op, variant).unwrap(),
     };
     // then project
-    let field = ecx.operand_field(down, field.index() as u64).unwrap();
+    let field = ecx.operand_field(down, field.index()).unwrap();
     // and finally move back to the const world, always normalizing because
     // this is not called for statics.
     op_to_const(&ecx, field)
@@ -68,10 +70,11 @@ pub(crate) fn destructure_const<'tcx>(
 
     let variant = ecx.read_discriminant(op).unwrap().1;
 
+    // We go to `usize` as we cannot allocate anything bigger anyway.
     let field_count = match val.ty.kind {
-        ty::Array(_, len) => len.eval_usize(tcx, param_env),
-        ty::Adt(def, _) => def.variants[variant].fields.len() as u64,
-        ty::Tuple(substs) => substs.len() as u64,
+        ty::Array(_, len) => usize::try_from(len.eval_usize(tcx, param_env)).unwrap(),
+        ty::Adt(def, _) => def.variants[variant].fields.len(),
+        ty::Tuple(substs) => substs.len(),
         _ => bug!("cannot destructure constant {:?}", val),
     };
 
diff --git a/src/librustc_mir/interpret/cast.rs b/src/librustc_mir/interpret/cast.rs
index 5c70b28a567..f7327825ca4 100644
--- a/src/librustc_mir/interpret/cast.rs
+++ b/src/librustc_mir/interpret/cast.rs
@@ -1,3 +1,5 @@
+use std::convert::TryFrom;
+
 use rustc::ty::adjustment::PointerCast;
 use rustc::ty::layout::{self, Size, TyLayout};
 use rustc::ty::{self, Ty, TypeAndMut, TypeFoldable};
@@ -206,8 +208,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
 
             Char => {
                 // `u8` to `char` cast
-                assert_eq!(v as u8 as u128, v);
-                Ok(Scalar::from_uint(v, Size::from_bytes(4)))
+                Ok(Scalar::from_uint(u8::try_from(v).unwrap(), Size::from_bytes(4)))
             }
 
             // Casts to bool are not permitted by rustc, no need to handle them here.
@@ -227,16 +228,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         match dest_ty.kind {
             // float -> uint
             Uint(t) => {
-                let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits() as usize);
-                let v = f.to_u128(width).value;
+                let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits());
+                let v = f.to_u128(usize::try_from(width).unwrap()).value;
                 // This should already fit the bit width
-                Ok(Scalar::from_uint(v, Size::from_bits(width as u64)))
+                Ok(Scalar::from_uint(v, Size::from_bits(width)))
             }
             // float -> int
             Int(t) => {
-                let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits() as usize);
-                let v = f.to_i128(width).value;
-                Ok(Scalar::from_int(v, Size::from_bits(width as u64)))
+                let width = t.bit_width().unwrap_or_else(|| self.pointer_size().bits());
+                let v = f.to_i128(usize::try_from(width).unwrap()).value;
+                Ok(Scalar::from_int(v, Size::from_bits(width)))
             }
             // float -> f32
             Float(FloatTy::F32) => Ok(Scalar::from_f32(f.convert(&mut false).value)),
@@ -319,11 +320,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // Example: `Arc<T>` -> `Arc<Trait>`
                 // here we need to increase the size of every &T thin ptr field to a fat ptr
                 for i in 0..src.layout.fields.count() {
-                    let dst_field = self.place_field(dest, i as u64)?;
+                    let dst_field = self.place_field(dest, i)?;
                     if dst_field.layout.is_zst() {
                         continue;
                     }
-                    let src_field = self.operand_field(src, i as u64)?;
+                    let src_field = self.operand_field(src, i)?;
                     if src_field.layout.ty == dst_field.layout.ty {
                         self.copy_op(src_field, dst_field)?;
                     } else {
diff --git a/src/librustc_mir/interpret/eval_context.rs b/src/librustc_mir/interpret/eval_context.rs
index c50146f295a..c2baabf4233 100644
--- a/src/librustc_mir/interpret/eval_context.rs
+++ b/src/librustc_mir/interpret/eval_context.rs
@@ -413,6 +413,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // and it also rounds up to alignment, which we want to avoid,
                 // as the unsized field's alignment could be smaller.
                 assert!(!layout.ty.is_simd());
+                assert!(layout.fields.count() > 0);
                 trace!("DST layout: {:?}", layout);
 
                 let sized_size = layout.fields.offset(layout.fields.count() - 1);
@@ -452,7 +453,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // here. But this is where the add would go.)
 
                 // Return the sum of sizes and max of aligns.
-                let size = sized_size + unsized_size;
+                let size = sized_size + unsized_size; // `Size` addition
 
                 // Choose max of two known alignments (combined value must
                 // be aligned according to more restrictive of the two).
diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs
index 03aedad0d98..e5f89b10e76 100644
--- a/src/librustc_mir/interpret/intrinsics.rs
+++ b/src/librustc_mir/interpret/intrinsics.rs
@@ -29,11 +29,11 @@ fn numeric_intrinsic<'tcx, Tag>(
         Primitive::Int(integer, _) => integer.size(),
         _ => bug!("invalid `{}` argument: {:?}", name, bits),
     };
-    let extra = 128 - size.bits() as u128;
+    let extra = 128 - u128::from(size.bits());
     let bits_out = match name {
-        sym::ctpop => bits.count_ones() as u128,
-        sym::ctlz => bits.leading_zeros() as u128 - extra,
-        sym::cttz => (bits << extra).trailing_zeros() as u128 - extra,
+        sym::ctpop => u128::from(bits.count_ones()),
+        sym::ctlz => u128::from(bits.leading_zeros()) - extra,
+        sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
         sym::bswap => (bits << extra).swap_bytes(),
         sym::bitreverse => (bits << extra).reverse_bits(),
         _ => bug!("not a numeric intrinsic: {}", name),
@@ -261,7 +261,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let val_bits = self.force_bits(val, layout.size)?;
                 let raw_shift = self.read_scalar(args[1])?.not_undef()?;
                 let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
-                let width_bits = layout.size.bits() as u128;
+                let width_bits = u128::from(layout.size.bits());
                 let shift_bits = raw_shift_bits % width_bits;
                 let inv_shift_bits = (width_bits - shift_bits) % width_bits;
                 let result_bits = if intrinsic_name == sym::rotate_left {
@@ -350,8 +350,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 );
 
                 for i in 0..len {
-                    let place = self.place_field(dest, i)?;
-                    let value = if i == index { elem } else { self.operand_field(input, i)? };
+                    let place = self.place_index(dest, i)?;
+                    let value = if i == index { elem } else { self.operand_index(input, i)? };
                     self.copy_op(value, place)?;
                 }
             }
@@ -370,7 +370,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     "Return type `{}` must match vector element type `{}`",
                     dest.layout.ty, e_ty
                 );
-                self.copy_op(self.operand_field(args[0], index)?, dest)?;
+                self.copy_op(self.operand_index(args[0], index)?, dest)?;
             }
             _ => return Ok(false),
         }
diff --git a/src/librustc_mir/interpret/intrinsics/caller_location.rs b/src/librustc_mir/interpret/intrinsics/caller_location.rs
index dc2b0e1b983..01f9cdea0f0 100644
--- a/src/librustc_mir/interpret/intrinsics/caller_location.rs
+++ b/src/librustc_mir/interpret/intrinsics/caller_location.rs
@@ -1,3 +1,5 @@
+use std::convert::TryFrom;
+
 use rustc::middle::lang_items::PanicLocationLangItem;
 use rustc::ty::subst::Subst;
 use rustc_span::{Span, Symbol};
@@ -59,8 +61,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
         (
             Symbol::intern(&caller.file.name.to_string()),
-            caller.line as u32,
-            caller.col_display as u32 + 1,
+            u32::try_from(caller.line).unwrap(),
+            u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
         )
     }
 
diff --git a/src/librustc_mir/interpret/memory.rs b/src/librustc_mir/interpret/memory.rs
index 110f2ffd9d7..49b9018fd17 100644
--- a/src/librustc_mir/interpret/memory.rs
+++ b/src/librustc_mir/interpret/memory.rs
@@ -8,6 +8,7 @@
 
 use std::borrow::Cow;
 use std::collections::VecDeque;
+use std::convert::TryFrom;
 use std::ptr;
 
 use rustc::ty::layout::{Align, HasDataLayout, Size, TargetDataLayout};
@@ -346,7 +347,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         };
         Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
             Ok(bits) => {
-                let bits = bits as u64; // it's ptr-sized
+                let bits = u64::try_from(bits).unwrap(); // it's ptr-sized
                 assert!(size.bytes() == 0);
                 // Must be non-NULL.
                 if bits == 0 {
@@ -667,7 +668,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
             }
             if alloc.undef_mask().is_range_defined(i, i + Size::from_bytes(1)).is_ok() {
                 // this `as usize` is fine, since `i` came from a `usize`
-                let i = i.bytes() as usize;
+                let i = i.bytes_usize();
 
                 // Checked definedness (and thus range) and relocations. This access also doesn't
                 // influence interpreter execution but is only for debugging.
@@ -692,8 +693,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
             let mut pos = Size::ZERO;
             let relocation_width = (self.pointer_size().bytes() - 1) * 3;
             for (i, target_id) in relocations {
-                // this `as usize` is fine, since we can't print more chars than `usize::MAX`
-                write!(msg, "{:1$}", "", ((i - pos) * 3).bytes() as usize).unwrap();
+                write!(msg, "{:1$}", "", ((i - pos) * 3).bytes_usize()).unwrap();
                 let target = format!("({})", target_id);
                 // this `as usize` is fine, since we can't print more chars than `usize::MAX`
                 write!(msg, "└{0:─^1$}┘ ", target, relocation_width as usize).unwrap();
@@ -834,8 +834,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         src: impl IntoIterator<Item = u8>,
     ) -> InterpResult<'tcx> {
         let src = src.into_iter();
-        let size = Size::from_bytes(src.size_hint().0 as u64);
-        // `write_bytes` checks that this lower bound matches the upper bound matches reality.
+        let size = Size::from_bytes(src.size_hint().0);
+        // `write_bytes` checks that this lower bound `size` matches the upper bound and reality.
         let ptr = match self.check_ptr_access(ptr, size, Align::from_bytes(1).unwrap())? {
             Some(ptr) => ptr,
             None => return Ok(()), // zero-sized access
@@ -874,14 +874,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
 
         let tcx = self.tcx.tcx;
 
-        // The bits have to be saved locally before writing to dest in case src and dest overlap.
-        assert_eq!(size.bytes() as usize as u64, size.bytes());
-
         // This checks relocation edges on the src.
         let src_bytes =
             self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr();
         let dest_bytes =
-            self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?;
+            self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; // `Size` multiplication
 
         // If `dest_bytes` is empty we just optimize to not run anything for zsts.
         // See #67539
@@ -902,7 +899,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
             // touched if the bytes stay undef for the whole interpreter execution. On contemporary
             // operating system this can avoid physically allocating the page.
             let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
-            dest_alloc.mark_definedness(dest, size * length, false);
+            dest_alloc.mark_definedness(dest, size * length, false); // `Size` multiplication
             dest_alloc.mark_relocation_range(relocations);
             return Ok(());
         }
@@ -913,9 +910,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
         // The pointers above remain valid even if the `HashMap` table is moved around because they
         // point into the `Vec` storing the bytes.
         unsafe {
-            assert_eq!(size.bytes() as usize as u64, size.bytes());
             if src.alloc_id == dest.alloc_id {
                 if nonoverlapping {
+                    // `Size` additions
                     if (src.offset <= dest.offset && src.offset + size > dest.offset)
                         || (dest.offset <= src.offset && dest.offset + size > src.offset)
                     {
@@ -926,16 +923,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
                 for i in 0..length {
                     ptr::copy(
                         src_bytes,
-                        dest_bytes.offset((size.bytes() * i) as isize),
-                        size.bytes() as usize,
+                        dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
+                        size.bytes_usize(),
                     );
                 }
             } else {
                 for i in 0..length {
                     ptr::copy_nonoverlapping(
                         src_bytes,
-                        dest_bytes.offset((size.bytes() * i) as isize),
-                        size.bytes() as usize,
+                        dest_bytes.add((size * i).bytes_usize()), // `Size` multiplication
+                        size.bytes_usize(),
                     );
                 }
             }
@@ -975,7 +972,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
     ) -> InterpResult<'tcx, u128> {
         match scalar.to_bits_or_ptr(size, self) {
             Ok(bits) => Ok(bits),
-            Err(ptr) => Ok(M::ptr_to_int(&self, ptr)? as u128),
+            Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()),
         }
     }
 }
diff --git a/src/librustc_mir/interpret/operand.rs b/src/librustc_mir/interpret/operand.rs
index 90fb7eb2bb3..409c958ac39 100644
--- a/src/librustc_mir/interpret/operand.rs
+++ b/src/librustc_mir/interpret/operand.rs
@@ -1,7 +1,7 @@
 //! Functions concerning immediate values and operands, and reading from operands.
 //! All high-level functions to read from memory work on operands as sources.
 
-use std::convert::{TryFrom, TryInto};
+use std::convert::TryFrom;
 
 use super::{InterpCx, MPlaceTy, Machine, MemPlace, Place, PlaceTy};
 pub use rustc::mir::interpret::ScalarMaybeUndef;
@@ -341,7 +341,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     // Turn the wide MPlace into a string (must already be dereferenced!)
     pub fn read_str(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
         let len = mplace.len(self)?;
-        let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len as u64))?;
+        let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?;
         let str = ::std::str::from_utf8(bytes)
             .map_err(|err| err_ub_format!("this string is not valid UTF-8: {}", err))?;
         Ok(str)
@@ -351,7 +351,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     pub fn operand_field(
         &self,
         op: OpTy<'tcx, M::PointerTag>,
-        field: u64,
+        field: usize,
     ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
         let base = match op.try_as_mplace(self) {
             Ok(mplace) => {
@@ -362,7 +362,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             Err(value) => value,
         };
 
-        let field = field.try_into().unwrap();
         let field_layout = op.layout.field(self, field)?;
         if field_layout.is_zst() {
             let immediate = Scalar::zst().into();
@@ -384,6 +383,21 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })
     }
 
+    pub fn operand_index(
+        &self,
+        op: OpTy<'tcx, M::PointerTag>,
+        index: u64,
+    ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
+        if let Ok(index) = usize::try_from(index) {
+            // We can just treat this as a field.
+            self.operand_field(op, index)
+        } else {
+            // Indexing into a big array. This must be an mplace.
+            let mplace = op.assert_mem_place(self);
+            Ok(self.mplace_index(mplace, index)?.into())
+        }
+    }
+
     pub fn operand_downcast(
         &self,
         op: OpTy<'tcx, M::PointerTag>,
@@ -406,7 +420,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
         use rustc::mir::ProjectionElem::*;
         Ok(match *proj_elem {
-            Field(field, _) => self.operand_field(base, field.index() as u64)?,
+            Field(field, _) => self.operand_field(base, field.index())?,
             Downcast(_, variant) => self.operand_downcast(base, variant)?,
             Deref => self.deref_operand(base)?.into(),
             Subslice { .. } | ConstantIndex { .. } | Index(_) => {
@@ -556,11 +570,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 // where none should happen.
                 let ptr = Pointer::new(
                     self.tcx.alloc_map.lock().create_memory_alloc(data),
-                    Size::from_bytes(start as u64), // offset: `start`
+                    Size::from_bytes(start), // offset: `start`
                 );
                 Operand::Immediate(Immediate::new_slice(
                     self.tag_global_base_pointer(ptr).into(),
-                    (end - start) as u64, // len: `end - start`
+                    u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
                     self,
                 ))
             }
@@ -581,7 +595,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     .layout
                     .ty
                     .discriminant_for_variant(*self.tcx, index)
-                    .map_or(index.as_u32() as u128, |discr| discr.val);
+                    .map_or(u128::from(index.as_u32()), |discr| discr.val);
                 return Ok((discr_val, index));
             }
             layout::Variants::Multiple {
@@ -593,7 +607,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         };
 
         // read raw discriminant value
-        let discr_op = self.operand_field(rval, discr_index as u64)?;
+        let discr_op = self.operand_field(rval, discr_index)?;
         let discr_val = self.read_immediate(discr_op)?;
         let raw_discr = discr_val.to_scalar_or_undef();
         trace!("discr value: {:?}", raw_discr);
@@ -657,7 +671,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         if !ptr_valid {
                             throw_ub!(InvalidDiscriminant(raw_discr.erase_tag().into()))
                         }
-                        (dataful_variant.as_u32() as u128, dataful_variant)
+                        (u128::from(dataful_variant.as_u32()), dataful_variant)
                     }
                     Ok(raw_discr) => {
                         // We need to use machine arithmetic to get the relative variant idx:
@@ -686,7 +700,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                                 .expect("tagged layout for non adt")
                                 .variants
                                 .len();
-                            assert!((variant_index as usize) < variants_len);
+                            assert!(usize::try_from(variant_index).unwrap() < variants_len);
                             (u128::from(variant_index), VariantIdx::from_u32(variant_index))
                         } else {
                             (u128::from(dataful_variant.as_u32()), dataful_variant)
diff --git a/src/librustc_mir/interpret/operator.rs b/src/librustc_mir/interpret/operator.rs
index 76a5aecb9db..cb0aaa4d40d 100644
--- a/src/librustc_mir/interpret/operator.rs
+++ b/src/librustc_mir/interpret/operator.rs
@@ -1,3 +1,5 @@
+use std::convert::TryFrom;
+
 use rustc::mir;
 use rustc::mir::interpret::{InterpResult, Scalar};
 use rustc::ty::{
@@ -130,28 +132,27 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // Shift ops can have an RHS with a different numeric type.
         if bin_op == Shl || bin_op == Shr {
             let signed = left_layout.abi.is_signed();
-            let mut oflo = (r as u32 as u128) != r;
-            let mut r = r as u32;
-            let size = left_layout.size;
-            oflo |= r >= size.bits() as u32;
-            r %= size.bits() as u32;
+            let size = u128::from(left_layout.size.bits());
+            let overflow = r >= size;
+            let r = r % size; // mask to type size
+            let r = u32::try_from(r).unwrap(); // we masked so this will always fit
             let result = if signed {
                 let l = self.sign_extend(l, left_layout) as i128;
                 let result = match bin_op {
-                    Shl => l << r,
-                    Shr => l >> r,
+                    Shl => l.checked_shl(r).unwrap(),
+                    Shr => l.checked_shr(r).unwrap(),
                     _ => bug!("it has already been checked that this is a shift op"),
                 };
                 result as u128
             } else {
                 match bin_op {
-                    Shl => l << r,
-                    Shr => l >> r,
+                    Shl => l.checked_shl(r).unwrap(),
+                    Shr => l.checked_shr(r).unwrap(),
                     _ => bug!("it has already been checked that this is a shift op"),
                 }
             };
             let truncated = self.truncate(result, left_layout);
-            return Ok((Scalar::from_uint(truncated, size), oflo, left_layout.ty));
+            return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
         }
 
         // For the remaining ops, the types must be the same on both sides
@@ -193,7 +194,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 _ => None,
             };
             if let Some(op) = op {
-                let l128 = self.sign_extend(l, left_layout) as i128;
                 let r = self.sign_extend(r, right_layout) as i128;
                 // We need a special check for overflowing remainder:
                 // "int_min % -1" overflows and returns 0, but after casting things to a larger int
@@ -206,8 +206,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     }
                     _ => {}
                 }
+                let l = self.sign_extend(l, left_layout) as i128;
 
-                let (result, oflo) = op(l128, r);
+                let (result, oflo) = op(l, r);
                 // This may be out-of-bounds for the result type, so we have to truncate ourselves.
                 // If that truncation loses any information, we have an overflow.
                 let result = result as u128;
diff --git a/src/librustc_mir/interpret/place.rs b/src/librustc_mir/interpret/place.rs
index 6cf11c071e4..5cf267c257d 100644
--- a/src/librustc_mir/interpret/place.rs
+++ b/src/librustc_mir/interpret/place.rs
@@ -385,43 +385,20 @@ where
         Ok(place)
     }
 
-    /// Offset a pointer to project to a field. Unlike `place_field`, this is always
-    /// possible without allocating, so it can take `&self`. Also return the field's layout.
+    /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
+    /// always possible without allocating, so it can take `&self`. Also return the field's layout.
     /// This supports both struct and array fields.
+    ///
+    /// This also works for arrays, but then the `usize` index type is restricting.
+    /// For indexing into arrays, use `mplace_index`.
     #[inline(always)]
     pub fn mplace_field(
         &self,
         base: MPlaceTy<'tcx, M::PointerTag>,
-        field: u64,
+        field: usize,
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
-        // Not using the layout method because we want to compute on u64
-        let offset = match base.layout.fields {
-            layout::FieldPlacement::Arbitrary { ref offsets, .. } => {
-                offsets[usize::try_from(field).unwrap()]
-            }
-            layout::FieldPlacement::Array { stride, .. } => {
-                let len = base.len(self)?;
-                if field >= len {
-                    // This can only be reached in ConstProp and non-rustc-MIR.
-                    throw_ub!(BoundsCheckFailed { len, index: field });
-                }
-                stride * field
-            }
-            layout::FieldPlacement::Union(count) => {
-                assert!(
-                    field < count as u64,
-                    "Tried to access field {} of union {:#?} with {} fields",
-                    field,
-                    base.layout,
-                    count
-                );
-                // Offset is always 0
-                Size::from_bytes(0)
-            }
-        };
-        // the only way conversion can fail if is this is an array (otherwise we already panicked
-        // above). In that case, all fields are equal.
-        let field_layout = base.layout.field(self, usize::try_from(field).unwrap_or(0))?;
+        let offset = base.layout.fields.offset(field);
+        let field_layout = base.layout.field(self, field)?;
 
         // Offset may need adjustment for unsized fields.
         let (meta, offset) = if field_layout.is_unsized() {
@@ -451,6 +428,32 @@ where
         base.offset(offset, meta, field_layout, self)
     }
 
+    /// Index into an array.
+    #[inline(always)]
+    pub fn mplace_index(
+        &self,
+        base: MPlaceTy<'tcx, M::PointerTag>,
+        index: u64,
+    ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
+        // Not using the layout method because we want to compute on u64
+        match base.layout.fields {
+            layout::FieldPlacement::Array { stride, .. } => {
+                let len = base.len(self)?;
+                if index >= len {
+                    // This can only be reached in ConstProp and non-rustc-MIR.
+                    throw_ub!(BoundsCheckFailed { len, index });
+                }
+                let offset = stride * index; // `Size` multiplication
+                // All fields have the same layout.
+                let field_layout = base.layout.field(self, 0)?;
+
+                assert!(!field_layout.is_unsized());
+                base.offset(offset, MemPlaceMeta::None, field_layout, self)
+            }
+            _ => bug!("`mplace_index` called on non-array type {:?}", base.layout.ty),
+        }
+    }
+
     // Iterates over all fields of an array. Much more efficient than doing the
     // same by repeatedly calling `mplace_array`.
     pub(super) fn mplace_array_fields(
@@ -465,7 +468,8 @@ where
         };
         let layout = base.layout.field(self, 0)?;
         let dl = &self.tcx.data_layout;
-        Ok((0..len).map(move |i| base.offset(i * stride, MemPlaceMeta::None, layout, dl)))
+        // `Size` multiplication
+        Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl)))
     }
 
     fn mplace_subslice(
@@ -477,11 +481,11 @@ where
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
         let len = base.len(self)?; // also asserts that we have a type where this makes sense
         let actual_to = if from_end {
-            if from + to > len {
+            if from.checked_add(to).map_or(true, |to| to > len) {
                 // This can only be reached in ConstProp and non-rustc-MIR.
-                throw_ub!(BoundsCheckFailed { len: len as u64, index: from as u64 + to as u64 });
+                throw_ub!(BoundsCheckFailed { len: len, index: from.saturating_add(to) });
             }
-            len - to
+            len.checked_sub(to).unwrap()
         } else {
             to
         };
@@ -489,12 +493,12 @@ where
         // Not using layout method because that works with usize, and does not work with slices
         // (that have count 0 in their layout).
         let from_offset = match base.layout.fields {
-            layout::FieldPlacement::Array { stride, .. } => stride * from,
+            layout::FieldPlacement::Array { stride, .. } => stride * from, // `Size` multiplication is checked
             _ => bug!("Unexpected layout of index access: {:#?}", base.layout),
         };
 
         // Compute meta and new layout
-        let inner_len = actual_to - from;
+        let inner_len = actual_to.checked_sub(from).unwrap();
         let (meta, ty) = match base.layout.ty.kind {
             // It is not nice to match on the type, but that seems to be the only way to
             // implement this.
@@ -527,7 +531,7 @@ where
     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
         use rustc::mir::ProjectionElem::*;
         Ok(match *proj_elem {
-            Field(field, _) => self.mplace_field(base, field.index() as u64)?,
+            Field(field, _) => self.mplace_field(base, field.index())?,
             Downcast(_, variant) => self.mplace_downcast(base, variant)?,
             Deref => self.deref_operand(base.into())?,
 
@@ -535,26 +539,29 @@ where
                 let layout = self.layout_of(self.tcx.types.usize)?;
                 let n = self.access_local(self.frame(), local, Some(layout))?;
                 let n = self.read_scalar(n)?;
-                let n = self.force_bits(n.not_undef()?, self.tcx.data_layout.pointer_size)?;
-                self.mplace_field(base, u64::try_from(n).unwrap())?
+                let n = u64::try_from(
+                    self.force_bits(n.not_undef()?, self.tcx.data_layout.pointer_size)?,
+                )
+                .unwrap();
+                self.mplace_index(base, n)?
             }
 
             ConstantIndex { offset, min_length, from_end } => {
                 let n = base.len(self)?;
-                if n < min_length as u64 {
+                if n < u64::from(min_length) {
                     // This can only be reached in ConstProp and non-rustc-MIR.
-                    throw_ub!(BoundsCheckFailed { len: min_length as u64, index: n as u64 });
+                    throw_ub!(BoundsCheckFailed { len: min_length.into(), index: n.into() });
                 }
 
                 let index = if from_end {
-                    assert!(0 < offset && offset - 1 < min_length);
-                    n - u64::from(offset)
+                    assert!(0 < offset && offset <= min_length);
+                    n.checked_sub(u64::from(offset)).unwrap()
                 } else {
                     assert!(offset < min_length);
                     u64::from(offset)
                 };
 
-                self.mplace_field(base, index)?
+                self.mplace_index(base, index)?
             }
 
             Subslice { from, to, from_end } => {
@@ -570,7 +577,7 @@ where
     pub fn place_field(
         &mut self,
         base: PlaceTy<'tcx, M::PointerTag>,
-        field: u64,
+        field: usize,
     ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
         // FIXME: We could try to be smarter and avoid allocation for fields that span the
         // entire place.
@@ -578,6 +585,15 @@ where
         Ok(self.mplace_field(mplace, field)?.into())
     }
 
+    pub fn place_index(
+        &mut self,
+        base: PlaceTy<'tcx, M::PointerTag>,
+        index: u64,
+    ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
+        let mplace = self.force_allocation(base)?;
+        Ok(self.mplace_index(mplace, index)?.into())
+    }
+
     pub fn place_downcast(
         &self,
         base: PlaceTy<'tcx, M::PointerTag>,
@@ -603,7 +619,7 @@ where
     ) -> InterpResult<'tcx, PlaceTy<'tcx, M::PointerTag>> {
         use rustc::mir::ProjectionElem::*;
         Ok(match *proj_elem {
-            Field(field, _) => self.place_field(base, field.index() as u64)?,
+            Field(field, _) => self.place_field(base, field.index())?,
             Downcast(_, variant) => self.place_downcast(base, variant)?,
             Deref => self.deref_operand(self.place_to_op(base)?)?.into(),
             // For the other variants, we have to force an allocation.
@@ -723,7 +739,7 @@ where
                 ),
                 Immediate::Scalar(ScalarMaybeUndef::Scalar(Scalar::Raw { size, .. })) => {
                     assert_eq!(
-                        Size::from_bytes(size.into()),
+                        Size::from_bytes(size),
                         dest.layout.size,
                         "Size mismatch when writing bits"
                     )
@@ -1028,7 +1044,7 @@ where
         kind: MemoryKind<M::MemoryKind>,
     ) -> MPlaceTy<'tcx, M::PointerTag> {
         let ptr = self.memory.allocate_bytes(str.as_bytes(), kind);
-        let meta = Scalar::from_uint(str.len() as u128, self.pointer_size());
+        let meta = Scalar::from_uint(u128::try_from(str.len()).unwrap(), self.pointer_size());
         let mplace = MemPlace {
             ptr: ptr.into(),
             align: Align::from_bytes(1).unwrap(),
@@ -1072,7 +1088,7 @@ where
                 let size = discr_layout.value.size(self);
                 let discr_val = truncate(discr_val, size);
 
-                let discr_dest = self.place_field(dest, discr_index as u64)?;
+                let discr_dest = self.place_field(dest, discr_index)?;
                 self.write_scalar(Scalar::from_uint(discr_val, size), discr_dest)?;
             }
             layout::Variants::Multiple {
@@ -1103,7 +1119,7 @@ where
                         niche_start_val,
                     )?;
                     // Write result.
-                    let niche_dest = self.place_field(dest, discr_index as u64)?;
+                    let niche_dest = self.place_field(dest, discr_index)?;
                     self.write_immediate(*discr_val, niche_dest)?;
                 }
             }
diff --git a/src/librustc_mir/interpret/step.rs b/src/librustc_mir/interpret/step.rs
index cb11df18378..6ec11d42f52 100644
--- a/src/librustc_mir/interpret/step.rs
+++ b/src/librustc_mir/interpret/step.rs
@@ -192,7 +192,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                     // Ignore zero-sized fields.
                     if !op.layout.is_zst() {
                         let field_index = active_field_index.unwrap_or(i);
-                        let field_dest = self.place_field(dest, field_index as u64)?;
+                        let field_dest = self.place_field(dest, field_index)?;
                         self.copy_op(op, field_dest)?;
                     }
                 }
diff --git a/src/librustc_mir/interpret/terminator.rs b/src/librustc_mir/interpret/terminator.rs
index 6b0bbe4f6e0..5ce5ba31a09 100644
--- a/src/librustc_mir/interpret/terminator.rs
+++ b/src/librustc_mir/interpret/terminator.rs
@@ -1,4 +1,5 @@
 use std::borrow::Cow;
+use std::convert::TryFrom;
 
 use rustc::ty::layout::{self, LayoutOf, TyLayout};
 use rustc::ty::Instance;
@@ -29,6 +30,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 trace!("SwitchInt({:?})", *discr);
 
                 // Branch to the `otherwise` case by default, if no match is found.
+                assert!(targets.len() > 0);
                 let mut target_block = targets[targets.len() - 1];
 
                 for (index, &const_int) in values.iter().enumerate() {
@@ -307,7 +309,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                                     .map(|&a| Ok(a))
                                     .chain(
                                         (0..untuple_arg.layout.fields.count())
-                                            .map(|i| self.operand_field(untuple_arg, i as u64)),
+                                            .map(|i| self.operand_field(untuple_arg, i)),
                                     )
                                     .collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>(
                                     )?,
@@ -330,7 +332,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                         if Some(local) == body.spread_arg {
                             // Must be a tuple
                             for i in 0..dest.layout.fields.count() {
-                                let dest = self.place_field(dest, i as u64)?;
+                                let dest = self.place_field(dest, i)?;
                                 self.pass_argument(rust_abi, &mut caller_iter, dest)?;
                             }
                         } else {
@@ -392,7 +394,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 };
                 // Find and consult vtable
                 let vtable = receiver_place.vtable();
-                let drop_fn = self.get_vtable_slot(vtable, idx)?;
+                let drop_fn = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
 
                 // `*mut receiver_place.layout.ty` is almost the layout that we
                 // want for args[0]: We have to project to field 0 because we want
diff --git a/src/librustc_mir/interpret/traits.rs b/src/librustc_mir/interpret/traits.rs
index efbbca53485..1e63766b85d 100644
--- a/src/librustc_mir/interpret/traits.rs
+++ b/src/librustc_mir/interpret/traits.rs
@@ -1,9 +1,11 @@
-use super::{FnVal, InterpCx, Machine, MemoryKind};
+use std::convert::TryFrom;
 
 use rustc::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar};
 use rustc::ty::layout::{Align, HasDataLayout, LayoutOf, Size};
 use rustc::ty::{self, Instance, Ty, TypeFoldable};
 
+use super::{FnVal, InterpCx, Machine, MemoryKind};
+
 impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     /// Creates a dynamic vtable for the given type and vtable origin. This is used only for
     /// objects.
@@ -54,7 +56,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
         // `get_vtable` in `rust_codegen_llvm/meth.rs`.
         // /////////////////////////////////////////////////////////////////////////////////////////
         let vtable = self.memory.allocate(
-            ptr_size * (3 + methods.len() as u64),
+            ptr_size * u64::try_from(methods.len()).unwrap().checked_add(3).unwrap(),
             ptr_align,
             MemoryKind::Vtable,
         );
@@ -103,11 +105,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
     pub fn get_vtable_slot(
         &self,
         vtable: Scalar<M::PointerTag>,
-        idx: usize,
+        idx: u64,
     ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
         let ptr_size = self.pointer_size();
         // Skip over the 'drop_ptr', 'size', and 'align' fields.
-        let vtable_slot = vtable.ptr_offset(ptr_size * (idx as u64 + 3), self)?;
+        let vtable_slot = vtable.ptr_offset(ptr_size * idx.checked_add(3).unwrap(), self)?;
         let vtable_slot = self
             .memory
             .check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
@@ -169,10 +171,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
             .expect("cannot be a ZST");
         let alloc = self.memory.get_raw(vtable.alloc_id)?;
         let size = alloc.read_ptr_sized(self, vtable.offset(pointer_size, self)?)?.not_undef()?;
-        let size = self.force_bits(size, pointer_size)? as u64;
+        let size = u64::try_from(self.force_bits(size, pointer_size)?).unwrap();
         let align =
             alloc.read_ptr_sized(self, vtable.offset(pointer_size * 2, self)?)?.not_undef()?;
-        let align = self.force_bits(align, pointer_size)? as u64;
+        let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap();
 
         if size >= self.tcx.data_layout().obj_size_bound() {
             throw_ub_format!(
diff --git a/src/librustc_mir/interpret/validity.rs b/src/librustc_mir/interpret/validity.rs
index 6f9543bf95a..a355a227480 100644
--- a/src/librustc_mir/interpret/validity.rs
+++ b/src/librustc_mir/interpret/validity.rs
@@ -4,6 +4,7 @@
 //! That's useful because it means other passes (e.g. promotion) can rely on `const`s
 //! to be const-safe.
 
+use std::convert::TryFrom;
 use std::fmt::Write;
 use std::ops::RangeInclusive;
 
@@ -746,7 +747,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                 }
                 // This is the element type size.
                 let layout = self.ecx.layout_of(tys)?;
-                // This is the size in bytes of the whole array.
+                // This is the size in bytes of the whole array. (This checks for overflow.)
                 let size = layout.size * len;
                 // Size is not 0, get a pointer.
                 let ptr = self.ecx.force_ptr(mplace.ptr)?;
@@ -777,7 +778,8 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
                                 // Some byte was undefined, determine which
                                 // element that byte belongs to so we can
                                 // provide an index.
-                                let i = (ptr.offset.bytes() / layout.size.bytes()) as usize;
+                                let i = usize::try_from(ptr.offset.bytes() / layout.size.bytes())
+                                    .unwrap();
                                 self.path.push(PathElem::ArrayElem(i));
 
                                 throw_validation_failure!("undefined bytes", self.path)
diff --git a/src/librustc_mir/interpret/visitor.rs b/src/librustc_mir/interpret/visitor.rs
index 8808fc70cf7..e8a76264064 100644
--- a/src/librustc_mir/interpret/visitor.rs
+++ b/src/librustc_mir/interpret/visitor.rs
@@ -28,7 +28,8 @@ pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy {
     ) -> InterpResult<'tcx, Self>;
 
     /// Projects to the n-th field.
-    fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: u64) -> InterpResult<'tcx, Self>;
+    fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: usize)
+    -> InterpResult<'tcx, Self>;
 }
 
 // Operands and memory-places are both values.
@@ -62,7 +63,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::
     }
 
     #[inline(always)]
-    fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: u64) -> InterpResult<'tcx, Self> {
+    fn project_field(
+        self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        field: usize,
+    ) -> InterpResult<'tcx, Self> {
         ecx.operand_field(self, field)
     }
 }
@@ -96,7 +101,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for MPlaceTy<'tcx,
     }
 
     #[inline(always)]
-    fn project_field(self, ecx: &InterpCx<'mir, 'tcx, M>, field: u64) -> InterpResult<'tcx, Self> {
+    fn project_field(
+        self,
+        ecx: &InterpCx<'mir, 'tcx, M>,
+        field: usize,
+    ) -> InterpResult<'tcx, Self> {
         ecx.mplace_field(self, field)
     }
 }
@@ -206,7 +215,7 @@ macro_rules! make_value_visitor {
                         // errors: Projecting to a field needs access to `ecx`.
                         let fields: Vec<InterpResult<'tcx, Self::V>> =
                             (0..offsets.len()).map(|i| {
-                                v.project_field(self.ecx(), i as u64)
+                                v.project_field(self.ecx(), i)
                             })
                             .collect();
                         self.visit_aggregate(v, fields.into_iter())?;
diff --git a/src/librustc_mir_build/hair/pattern/_match.rs b/src/librustc_mir_build/hair/pattern/_match.rs
index 89063a4227f..76fcbf326fa 100644
--- a/src/librustc_mir_build/hair/pattern/_match.rs
+++ b/src/librustc_mir_build/hair/pattern/_match.rs
@@ -1920,8 +1920,8 @@ fn slice_pat_covered_by_const<'tcx>(
         }
         (ConstValue::Slice { data, start, end }, ty::Slice(t)) => {
             assert_eq!(*t, tcx.types.u8);
-            let ptr = Pointer::new(AllocId(0), Size::from_bytes(start as u64));
-            data.get_bytes(&tcx, ptr, Size::from_bytes((end - start) as u64)).unwrap()
+            let ptr = Pointer::new(AllocId(0), Size::from_bytes(start));
+            data.get_bytes(&tcx, ptr, Size::from_bytes(end - start)).unwrap()
         }
         // FIXME(oli-obk): create a way to extract fat pointers from ByRef
         (_, ty::Slice(_)) => return Ok(false),
@@ -2375,7 +2375,7 @@ fn specialize_one_pattern<'p, 'tcx>(
                 ty::Slice(t) => {
                     match value.val {
                         ty::ConstKind::Value(ConstValue::Slice { data, start, end }) => {
-                            let offset = Size::from_bytes(start as u64);
+                            let offset = Size::from_bytes(start);
                             let n = (end - start) as u64;
                             (Cow::Borrowed(data), offset, n, t)
                         }
diff --git a/src/librustc_target/abi/mod.rs b/src/librustc_target/abi/mod.rs
index 635fb80b659..74d9817d277 100644
--- a/src/librustc_target/abi/mod.rs
+++ b/src/librustc_target/abi/mod.rs
@@ -3,6 +3,7 @@ pub use Primitive::*;
 
 use crate::spec::Target;
 
+use std::convert::{TryFrom, TryInto};
 use std::ops::{Add, AddAssign, Deref, Mul, Range, RangeInclusive, Sub};
 
 use rustc_index::vec::{Idx, IndexVec};
@@ -240,17 +241,18 @@ pub struct Size {
 }
 
 impl Size {
-    pub const ZERO: Size = Self::from_bytes(0);
+    pub const ZERO: Size = Size { raw: 0 };
 
     #[inline]
-    pub fn from_bits(bits: u64) -> Size {
+    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
+        let bits = bits.try_into().ok().unwrap();
         // Avoid potential overflow from `bits + 7`.
         Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8)
     }
 
     #[inline]
-    pub const fn from_bytes(bytes: u64) -> Size {
-        Size { raw: bytes }
+    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
+        Size { raw: bytes.try_into().ok().unwrap() }
     }
 
     #[inline]
@@ -259,6 +261,11 @@ impl Size {
     }
 
     #[inline]
+    pub fn bytes_usize(self) -> usize {
+        self.bytes().try_into().unwrap()
+    }
+
+    #[inline]
     pub fn bits(self) -> u64 {
         self.bytes().checked_mul(8).unwrap_or_else(|| {
             panic!("Size::bits: {} bytes in bits doesn't fit in u64", self.bytes())
@@ -266,6 +273,11 @@ impl Size {
     }
 
     #[inline]
+    pub fn bits_usize(self) -> usize {
+        self.bits().try_into().unwrap()
+    }
+
+    #[inline]
     pub fn align_to(self, align: Align) -> Size {
         let mask = align.bytes() - 1;
         Size::from_bytes((self.bytes() + mask) & !mask)
@@ -665,7 +677,7 @@ impl FieldPlacement {
                 Size::ZERO
             }
             FieldPlacement::Array { stride, count } => {
-                let i = i as u64;
+                let i = u64::try_from(i).unwrap();
                 assert!(i < count);
                 stride * i
             }