summary refs log tree commit diff
diff options
context:
space:
mode:
authorAriel Ben-Yehuda <ariel.byd@gmail.com>2017-12-21 22:31:37 +0200
committerAriel Ben-Yehuda <ariel.byd@gmail.com>2017-12-21 22:31:37 +0200
commit13f0d454e051510ffad9be14acb89d902570f9ac (patch)
treeb0162aeeead6add17f4fd4faf3c60aa90c15e647
parent479b734c65700941b80526a5c078bf2752b29a2e (diff)
downloadrust-13f0d454e051510ffad9be14acb89d902570f9ac.tar.gz
rust-13f0d454e051510ffad9be14acb89d902570f9ac.zip
Revert "Auto merge of #45225 - eddyb:trans-abi, r=arielb1"
This reverts commit f50fd075c2555d8511ccee8a7fe7aee3f2c45e14, reversing
changes made to 5041b3bb3d953a14f32b15d1e41341c629acae12.
-rw-r--r--src/liballoc/boxed.rs18
-rw-r--r--src/librustc/lib.rs4
-rw-r--r--src/librustc/lint/context.rs11
-rw-r--r--src/librustc/middle/mem_categorization.rs4
-rw-r--r--src/librustc/ty/context.rs8
-rw-r--r--src/librustc/ty/layout.rs2686
-rw-r--r--src/librustc/ty/maps/mod.rs4
-rw-r--r--src/librustc/ty/mod.rs8
-rw-r--r--src/librustc/ty/util.rs46
-rw-r--r--src/librustc_const_eval/_match.rs6
-rw-r--r--src/librustc_const_eval/eval.rs7
-rw-r--r--src/librustc_const_eval/pattern.rs4
-rw-r--r--src/librustc_lint/types.rs14
-rw-r--r--src/librustc_llvm/ffi.rs40
-rw-r--r--src/librustc_llvm/lib.rs9
-rw-r--r--src/librustc_mir/build/matches/simplify.rs21
-rw-r--r--src/librustc_mir/build/matches/test.rs4
-rw-r--r--src/librustc_mir/hair/cx/mod.rs4
-rw-r--r--src/librustc_mir/transform/deaggregator.rs4
-rw-r--r--src/librustc_mir/transform/inline.rs5
-rw-r--r--src/librustc_mir/transform/type_check.rs2
-rw-r--r--src/librustc_mir/util/elaborate_drops.rs2
-rw-r--r--src/librustc_trans/abi.rs813
-rw-r--r--src/librustc_trans/adt.rs497
-rw-r--r--src/librustc_trans/asm.rs28
-rw-r--r--src/librustc_trans/attributes.rs2
-rw-r--r--src/librustc_trans/base.rs272
-rw-r--r--src/librustc_trans/builder.rs132
-rw-r--r--src/librustc_trans/cabi_aarch64.rs18
-rw-r--r--src/librustc_trans/cabi_arm.rs18
-rw-r--r--src/librustc_trans/cabi_asmjs.rs15
-rw-r--r--src/librustc_trans/cabi_hexagon.rs19
-rw-r--r--src/librustc_trans/cabi_mips.rs33
-rw-r--r--src/librustc_trans/cabi_mips64.rs33
-rw-r--r--src/librustc_trans/cabi_msp430.rs19
-rw-r--r--src/librustc_trans/cabi_nvptx.rs19
-rw-r--r--src/librustc_trans/cabi_nvptx64.rs19
-rw-r--r--src/librustc_trans/cabi_powerpc.rs32
-rw-r--r--src/librustc_trans/cabi_powerpc64.rs26
-rw-r--r--src/librustc_trans/cabi_s390x.rs47
-rw-r--r--src/librustc_trans/cabi_sparc.rs35
-rw-r--r--src/librustc_trans/cabi_sparc64.rs22
-rw-r--r--src/librustc_trans/cabi_x86.rs64
-rw-r--r--src/librustc_trans/cabi_x86_64.rs95
-rw-r--r--src/librustc_trans/cabi_x86_win64.rs44
-rw-r--r--src/librustc_trans/callee.rs6
-rw-r--r--src/librustc_trans/common.rs155
-rw-r--r--src/librustc_trans/consts.rs29
-rw-r--r--src/librustc_trans/context.rs75
-rw-r--r--src/librustc_trans/debuginfo/metadata.rs709
-rw-r--r--src/librustc_trans/debuginfo/mod.rs9
-rw-r--r--src/librustc_trans/debuginfo/utils.rs13
-rw-r--r--src/librustc_trans/glue.rs59
-rw-r--r--src/librustc_trans/intrinsic.rs516
-rw-r--r--src/librustc_trans/lib.rs5
-rw-r--r--src/librustc_trans/machine.rs79
-rw-r--r--src/librustc_trans/meth.rs26
-rw-r--r--src/librustc_trans/mir/analyze.rs46
-rw-r--r--src/librustc_trans/mir/block.rs576
-rw-r--r--src/librustc_trans/mir/constant.rs392
-rw-r--r--src/librustc_trans/mir/lvalue.rs522
-rw-r--r--src/librustc_trans/mir/mod.rs223
-rw-r--r--src/librustc_trans/mir/operand.rs377
-rw-r--r--src/librustc_trans/mir/rvalue.rs381
-rw-r--r--src/librustc_trans/mir/statement.rs49
-rw-r--r--src/librustc_trans/trans_item.rs5
-rw-r--r--src/librustc_trans/tvec.rs53
-rw-r--r--src/librustc_trans/type_.rs48
-rw-r--r--src/librustc_trans/type_of.rs623
-rw-r--r--src/librustc_trans_utils/monomorphize.rs11
-rw-r--r--src/rustllvm/RustWrapper.cpp65
-rw-r--r--src/test/codegen/adjustments.rs7
-rw-r--r--src/test/codegen/consts.rs4
-rw-r--r--src/test/codegen/function-arguments.rs25
-rw-r--r--src/test/codegen/issue-32031.rs4
-rw-r--r--src/test/codegen/link_section.rs4
-rw-r--r--src/test/codegen/match-optimizes-away.rs20
-rw-r--r--src/test/codegen/packed.rs5
-rw-r--r--src/test/codegen/refs.rs9
-rw-r--r--src/test/codegen/slice-init.rs12
-rw-r--r--src/test/run-make/issue-25581/test.c13
-rw-r--r--src/test/run-pass/enum-discrim-manual-sizing.rs3
-rw-r--r--src/test/run-pass/enum-univariant-repr.rs13
-rw-r--r--src/test/run-pass/issue-30276.rs (renamed from src/test/ui/print_type_sizes/uninhabited.rs)12
-rw-r--r--src/test/run-pass/packed-struct-optimized-enum.rs25
-rw-r--r--src/test/ui/issue-26548.rs (renamed from src/test/compile-fail/issue-26548.rs)5
-rw-r--r--src/test/ui/issue-26548.stderr9
-rw-r--r--src/test/ui/print_type_sizes/niche-filling.stdout80
-rw-r--r--src/test/ui/print_type_sizes/nullable.rs (renamed from src/test/ui/print_type_sizes/niche-filling.rs)18
-rw-r--r--src/test/ui/print_type_sizes/nullable.stdout24
-rw-r--r--src/test/ui/print_type_sizes/uninhabited.stdout5
-rw-r--r--src/tools/cargotest/main.rs4
-rw-r--r--src/tools/toolstate.toml2
93 files changed, 5603 insertions, 4965 deletions
diff --git a/src/liballoc/boxed.rs b/src/liballoc/boxed.rs
index 2226cee6e36..79292d390e5 100644
--- a/src/liballoc/boxed.rs
+++ b/src/liballoc/boxed.rs
@@ -151,7 +151,7 @@ impl<T> Place<T> for IntermediateBox<T> {
 unsafe fn finalize<T>(b: IntermediateBox<T>) -> Box<T> {
     let p = b.ptr as *mut T;
     mem::forget(b);
-    Box::from_raw(p)
+    mem::transmute(p)
 }
 
 fn make_place<T>() -> IntermediateBox<T> {
@@ -300,10 +300,7 @@ impl<T: ?Sized> Box<T> {
                issue = "27730")]
     #[inline]
     pub unsafe fn from_unique(u: Unique<T>) -> Self {
-        #[cfg(stage0)]
-        return mem::transmute(u);
-        #[cfg(not(stage0))]
-        return Box(u);
+        mem::transmute(u)
     }
 
     /// Consumes the `Box`, returning the wrapped raw pointer.
@@ -365,14 +362,7 @@ impl<T: ?Sized> Box<T> {
                issue = "27730")]
     #[inline]
     pub fn into_unique(b: Box<T>) -> Unique<T> {
-        #[cfg(stage0)]
-        return unsafe { mem::transmute(b) };
-        #[cfg(not(stage0))]
-        return {
-            let unique = b.0;
-            mem::forget(b);
-            unique
-        };
+        unsafe { mem::transmute(b) }
     }
 }
 
@@ -637,7 +627,7 @@ impl Box<Any + Send> {
     pub fn downcast<T: Any>(self) -> Result<Box<T>, Box<Any + Send>> {
         <Box<Any>>::downcast(self).map_err(|s| unsafe {
             // reapply the Send marker
-            Box::from_raw(Box::into_raw(s) as *mut (Any + Send))
+            mem::transmute::<Box<Any>, Box<Any + Send>>(s)
         })
     }
 }
diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs
index b59f7480476..5e9019c92c5 100644
--- a/src/librustc/lib.rs
+++ b/src/librustc/lib.rs
@@ -46,13 +46,11 @@
 #![feature(const_fn)]
 #![feature(core_intrinsics)]
 #![feature(drain_filter)]
-#![feature(i128)]
 #![feature(i128_type)]
-#![feature(inclusive_range)]
+#![feature(match_default_bindings)]
 #![feature(inclusive_range_syntax)]
 #![cfg_attr(windows, feature(libc))]
 #![feature(macro_vis_matcher)]
-#![feature(match_default_bindings)]
 #![feature(never_type)]
 #![feature(nonzero)]
 #![feature(quote)]
diff --git a/src/librustc/lint/context.rs b/src/librustc/lint/context.rs
index 4496e07b138..601e0316d4a 100644
--- a/src/librustc/lint/context.rs
+++ b/src/librustc/lint/context.rs
@@ -34,8 +34,7 @@ use middle::privacy::AccessLevels;
 use rustc_serialize::{Decoder, Decodable, Encoder, Encodable};
 use session::{config, early_error, Session};
 use traits::Reveal;
-use ty::{self, TyCtxt, Ty};
-use ty::layout::{LayoutError, LayoutOf, TyLayout};
+use ty::{self, TyCtxt};
 use util::nodemap::FxHashMap;
 
 use std::default::Default as StdDefault;
@@ -627,14 +626,6 @@ impl<'a, 'tcx> LateContext<'a, 'tcx> {
     }
 }
 
-impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a LateContext<'a, 'tcx> {
-    type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
-
-    fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
-        (self.tcx, self.param_env.reveal_all()).layout_of(ty)
-    }
-}
-
 impl<'a, 'tcx> hir_visit::Visitor<'tcx> for LateContext<'a, 'tcx> {
     /// Because lints are scoped lexically, we want to walk nested
     /// items in the context of the outer item, so enable
diff --git a/src/librustc/middle/mem_categorization.rs b/src/librustc/middle/mem_categorization.rs
index c89d67d4aab..2c6bcc654a5 100644
--- a/src/librustc/middle/mem_categorization.rs
+++ b/src/librustc/middle/mem_categorization.rs
@@ -210,7 +210,7 @@ impl<'tcx> cmt_<'tcx> {
                 adt_def.variant_with_id(variant_did)
             }
             _ => {
-                assert_eq!(adt_def.variants.len(), 1);
+                assert!(adt_def.is_univariant());
                 &adt_def.variants[0]
             }
         };
@@ -1096,7 +1096,7 @@ impl<'a, 'gcx, 'tcx> MemCategorizationContext<'a, 'gcx, 'tcx> {
                                               -> cmt<'tcx> {
         // univariant enums do not need downcasts
         let base_did = self.tcx.parent_def_id(variant_did).unwrap();
-        if self.tcx.adt_def(base_did).variants.len() != 1 {
+        if !self.tcx.adt_def(base_did).is_univariant() {
             let base_ty = base_cmt.ty;
             let ret = Rc::new(cmt_ {
                 id: node.id(),
diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs
index 904f9a09125..22a3edd200c 100644
--- a/src/librustc/ty/context.rs
+++ b/src/librustc/ty/context.rs
@@ -41,7 +41,7 @@ use ty::{PolyFnSig, InferTy, ParamTy, ProjectionTy, ExistentialPredicate, Predic
 use ty::RegionKind;
 use ty::{TyVar, TyVid, IntVar, IntVid, FloatVar, FloatVid};
 use ty::TypeVariants::*;
-use ty::layout::{LayoutDetails, TargetDataLayout};
+use ty::layout::{Layout, TargetDataLayout};
 use ty::maps;
 use ty::steal::Steal;
 use ty::BindingMode;
@@ -78,7 +78,7 @@ use hir;
 /// Internal storage
 pub struct GlobalArenas<'tcx> {
     // internings
-    layout: TypedArena<LayoutDetails>,
+    layout: TypedArena<Layout>,
 
     // references
     generics: TypedArena<ty::Generics>,
@@ -918,7 +918,7 @@ pub struct GlobalCtxt<'tcx> {
 
     stability_interner: RefCell<FxHashSet<&'tcx attr::Stability>>,
 
-    layout_interner: RefCell<FxHashSet<&'tcx LayoutDetails>>,
+    layout_interner: RefCell<FxHashSet<&'tcx Layout>>,
 
     /// A vector of every trait accessible in the whole crate
     /// (i.e. including those from subcrates). This is used only for
@@ -1016,7 +1016,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
         interned
     }
 
-    pub fn intern_layout(self, layout: LayoutDetails) -> &'gcx LayoutDetails {
+    pub fn intern_layout(self, layout: Layout) -> &'gcx Layout {
         if let Some(layout) = self.layout_interner.borrow().get(&layout) {
             return layout;
         }
diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs
index 71bf333a8c6..491fa2a240c 100644
--- a/src/librustc/ty/layout.rs
+++ b/src/librustc/ty/layout.rs
@@ -9,6 +9,7 @@
 // except according to those terms.
 
 pub use self::Integer::*;
+pub use self::Layout::*;
 pub use self::Primitive::*;
 
 use session::{self, DataTypeKind, Session};
@@ -20,10 +21,10 @@ use syntax_pos::DUMMY_SP;
 
 use std::cmp;
 use std::fmt;
-use std::i128;
+use std::i64;
 use std::iter;
 use std::mem;
-use std::ops::{Add, Sub, Mul, AddAssign, Deref, RangeInclusive};
+use std::ops::Deref;
 
 use ich::StableHashingContext;
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher,
@@ -202,18 +203,6 @@ impl TargetDataLayout {
             bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits)
         }
     }
-
-    pub fn vector_align(&self, vec_size: Size) -> Align {
-        for &(size, align) in &self.vector_align {
-            if size == vec_size {
-                return align;
-            }
-        }
-        // Default to natural alignment, which is what LLVM does.
-        // That is, use the size, rounded up to a power of 2.
-        let align = vec_size.bytes().next_power_of_two();
-        Align::from_bytes(align, align).unwrap()
-    }
 }
 
 pub trait HasDataLayout: Copy {
@@ -226,6 +215,12 @@ impl<'a> HasDataLayout for &'a TargetDataLayout {
     }
 }
 
+impl<'a, 'tcx> HasDataLayout for TyCtxt<'a, 'tcx, 'tcx> {
+    fn data_layout(&self) -> &TargetDataLayout {
+        &self.data_layout
+    }
+}
+
 /// Endianness of the target, which must match cfg(target-endian).
 #[derive(Copy, Clone)]
 pub enum Endian {
@@ -241,8 +236,7 @@ pub struct Size {
 
 impl Size {
     pub fn from_bits(bits: u64) -> Size {
-        // Avoid potential overflow from `bits + 7`.
-        Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8)
+        Size::from_bytes((bits + 7) / 8)
     }
 
     pub fn from_bytes(bytes: u64) -> Size {
@@ -267,11 +261,6 @@ impl Size {
         Size::from_bytes((self.bytes() + mask) & !mask)
     }
 
-    pub fn is_abi_aligned(self, align: Align) -> bool {
-        let mask = align.abi() - 1;
-        self.bytes() & mask == 0
-    }
-
     pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> {
         let dl = cx.data_layout();
 
@@ -289,6 +278,8 @@ impl Size {
     pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> {
         let dl = cx.data_layout();
 
+        // Each Size is less than dl.obj_size_bound(), so the sum is
+        // also less than 1 << 62 (and therefore can't overflow).
         match self.bytes().checked_mul(count) {
             Some(bytes) if bytes < dl.obj_size_bound() => {
                 Some(Size::from_bytes(bytes))
@@ -298,46 +289,6 @@ impl Size {
     }
 }
 
-// Panicking addition, subtraction and multiplication for convenience.
-// Avoid during layout computation, return `LayoutError` instead.
-
-impl Add for Size {
-    type Output = Size;
-    fn add(self, other: Size) -> Size {
-        // Each Size is less than 1 << 61, so the sum is
-        // less than 1 << 62 (and therefore can't overflow).
-        Size::from_bytes(self.bytes() + other.bytes())
-    }
-}
-
-impl Sub for Size {
-    type Output = Size;
-    fn sub(self, other: Size) -> Size {
-        // Each Size is less than 1 << 61, so an underflow
-        // would result in a value larger than 1 << 61,
-        // which Size::from_bytes will catch for us.
-        Size::from_bytes(self.bytes() - other.bytes())
-    }
-}
-
-impl Mul<u64> for Size {
-    type Output = Size;
-    fn mul(self, count: u64) -> Size {
-        match self.bytes().checked_mul(count) {
-            Some(bytes) => Size::from_bytes(bytes),
-            None => {
-                bug!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count)
-            }
-        }
-    }
-}
-
-impl AddAssign for Size {
-    fn add_assign(&mut self, other: Size) {
-        *self = *self + other;
-    }
-}
-
 /// Alignment of a type in bytes, both ABI-mandated and preferred.
 /// Each field is a power of two, giving the alignment a maximum
 /// value of 2^(2^8 - 1), which is limited by LLVM to a i32, with
@@ -350,8 +301,7 @@ pub struct Align {
 
 impl Align {
     pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
-        Align::from_bytes(Size::from_bits(abi).bytes(),
-                          Size::from_bits(pref).bytes())
+        Align::from_bytes((abi + 7) / 8, (pref + 7) / 8)
     }
 
     pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
@@ -390,14 +340,6 @@ impl Align {
         1 << self.pref
     }
 
-    pub fn abi_bits(self) -> u64 {
-        self.abi() * 8
-    }
-
-    pub fn pref_bits(self) -> u64 {
-        self.pref() * 8
-    }
-
     pub fn min(self, other: Align) -> Align {
         Align {
             abi: cmp::min(self.abi, other.abi),
@@ -416,6 +358,7 @@ impl Align {
 /// Integers, also used for enum discriminants.
 #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
 pub enum Integer {
+    I1,
     I8,
     I16,
     I32,
@@ -423,9 +366,10 @@ pub enum Integer {
     I128,
 }
 
-impl<'a, 'tcx> Integer {
+impl Integer {
     pub fn size(&self) -> Size {
         match *self {
+            I1 => Size::from_bits(1),
             I8 => Size::from_bytes(1),
             I16 => Size::from_bytes(2),
             I32 => Size::from_bytes(4),
@@ -438,6 +382,7 @@ impl<'a, 'tcx> Integer {
         let dl = cx.data_layout();
 
         match *self {
+            I1 => dl.i1_align,
             I8 => dl.i8_align,
             I16 => dl.i16_align,
             I32 => dl.i32_align,
@@ -446,13 +391,16 @@ impl<'a, 'tcx> Integer {
         }
     }
 
-    pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> {
+    pub fn to_ty<'a, 'tcx>(&self, tcx: &TyCtxt<'a, 'tcx, 'tcx>,
+                           signed: bool) -> Ty<'tcx> {
         match (*self, signed) {
+            (I1, false) => tcx.types.u8,
             (I8, false) => tcx.types.u8,
             (I16, false) => tcx.types.u16,
             (I32, false) => tcx.types.u32,
             (I64, false) => tcx.types.u64,
             (I128, false) => tcx.types.u128,
+            (I1, true) => tcx.types.i8,
             (I8, true) => tcx.types.i8,
             (I16, true) => tcx.types.i16,
             (I32, true) => tcx.types.i32,
@@ -462,8 +410,9 @@ impl<'a, 'tcx> Integer {
     }
 
     /// Find the smallest Integer type which can represent the signed value.
-    pub fn fit_signed(x: i128) -> Integer {
+    pub fn fit_signed(x: i64) -> Integer {
         match x {
+            -0x0000_0000_0000_0001...0x0000_0000_0000_0000 => I1,
             -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8,
             -0x0000_0000_0000_8000...0x0000_0000_0000_7fff => I16,
             -0x0000_0000_8000_0000...0x0000_0000_7fff_ffff => I32,
@@ -473,8 +422,9 @@ impl<'a, 'tcx> Integer {
     }
 
     /// Find the smallest Integer type which can represent the unsigned value.
-    pub fn fit_unsigned(x: u128) -> Integer {
+    pub fn fit_unsigned(x: u64) -> Integer {
         match x {
+            0...0x0000_0000_0000_0001 => I1,
             0...0x0000_0000_0000_00ff => I8,
             0...0x0000_0000_0000_ffff => I16,
             0...0x0000_0000_ffff_ffff => I32,
@@ -488,8 +438,8 @@ impl<'a, 'tcx> Integer {
         let dl = cx.data_layout();
 
         let wanted = align.abi();
-        for &candidate in &[I8, I16, I32, I64, I128] {
-            let ty = Int(candidate, false);
+        for &candidate in &[I8, I16, I32, I64] {
+            let ty = Int(candidate);
             if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() {
                 return Some(candidate);
             }
@@ -515,19 +465,19 @@ impl<'a, 'tcx> Integer {
 
     /// Find the appropriate Integer type and signedness for the given
     /// signed discriminant range and #[repr] attribute.
-    /// N.B.: u128 values above i128::MAX will be treated as signed, but
+    /// N.B.: u64 values above i64::MAX will be treated as signed, but
     /// that shouldn't affect anything, other than maybe debuginfo.
-    fn repr_discr(tcx: TyCtxt<'a, 'tcx, 'tcx>,
-                  ty: Ty<'tcx>,
-                  repr: &ReprOptions,
-                  min: i128,
-                  max: i128)
-                  -> (Integer, bool) {
+    fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                            ty: Ty<'tcx>,
+                            repr: &ReprOptions,
+                            min: i64,
+                            max: i64)
+                            -> (Integer, bool) {
         // Theoretically, negative values could be larger in unsigned representation
         // than the unsigned representation of the signed minimum. However, if there
-        // are any negative values, the only valid unsigned representation is u128
-        // which can fit all i128 values, so the result remains unaffected.
-        let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
+        // are any negative values, the only valid unsigned representation is u64
+        // which can fit all i64 values, so the result remains unaffected.
+        let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64));
         let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
 
         let mut min_from_extern = None;
@@ -568,27 +518,22 @@ impl<'a, 'tcx> Integer {
 /// Fundamental unit of memory access and layout.
 #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
 pub enum Primitive {
-    /// The `bool` is the signedness of the `Integer` type.
-    ///
-    /// One would think we would not care about such details this low down,
-    /// but some ABIs are described in terms of C types and ISAs where the
-    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
-    /// a negative integer passed by zero-extension will appear positive in
-    /// the callee, and most operations on it will produce the wrong values.
-    Int(Integer, bool),
+    Int(Integer),
     F32,
     F64,
     Pointer
 }
 
-impl<'a, 'tcx> Primitive {
+impl Primitive {
     pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
         let dl = cx.data_layout();
 
         match self {
-            Int(i, _) => i.size(),
-            F32 => Size::from_bits(32),
-            F64 => Size::from_bits(64),
+            Int(I1) | Int(I8) => Size::from_bits(8),
+            Int(I16) => Size::from_bits(16),
+            Int(I32) | F32 => Size::from_bits(32),
+            Int(I64) | F64 => Size::from_bits(64),
+            Int(I128) => Size::from_bits(128),
             Pointer => dl.pointer_size
         }
     }
@@ -597,627 +542,651 @@ impl<'a, 'tcx> Primitive {
         let dl = cx.data_layout();
 
         match self {
-            Int(i, _) => i.align(dl),
+            Int(I1) => dl.i1_align,
+            Int(I8) => dl.i8_align,
+            Int(I16) => dl.i16_align,
+            Int(I32) => dl.i32_align,
+            Int(I64) => dl.i64_align,
+            Int(I128) => dl.i128_align,
             F32 => dl.f32_align,
             F64 => dl.f64_align,
             Pointer => dl.pointer_align
         }
     }
+}
 
-    pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Ty<'tcx> {
-        match *self {
-            Int(i, signed) => i.to_ty(tcx, signed),
-            F32 => tcx.types.f32,
-            F64 => tcx.types.f64,
-            Pointer => tcx.mk_mut_ptr(tcx.mk_nil()),
-        }
-    }
+/// Path through fields of nested structures.
+// FIXME(eddyb) use small vector optimization for the common case.
+pub type FieldPath = Vec<u32>;
+
+/// A structure, a product type in ADT terms.
+#[derive(PartialEq, Eq, Hash, Debug)]
+pub struct Struct {
+    /// Maximum alignment of fields and repr alignment.
+    pub align: Align,
+
+    /// Primitive alignment of fields without repr alignment.
+    pub primitive_align: Align,
+
+    /// If true, no alignment padding is used.
+    pub packed: bool,
+
+    /// If true, the size is exact, otherwise it's only a lower bound.
+    pub sized: bool,
+
+    /// Offsets for the first byte of each field, ordered to match the source definition order.
+    /// This vector does not go in increasing order.
+    /// FIXME(eddyb) use small vector optimization for the common case.
+    pub offsets: Vec<Size>,
+
+    /// Maps source order field indices to memory order indices, depending how fields were permuted.
+    /// FIXME (camlorn) also consider small vector  optimization here.
+    pub memory_index: Vec<u32>,
+
+    pub min_size: Size,
 }
 
-/// Information about one scalar component of a Rust type.
-#[derive(Clone, PartialEq, Eq, Hash, Debug)]
-pub struct Scalar {
-    pub value: Primitive,
-
-    /// Inclusive wrap-around range of valid values, that is, if
-    /// min > max, it represents min..=u128::MAX followed by 0..=max.
-    // FIXME(eddyb) always use the shortest range, e.g. by finding
-    // the largest space between two consecutive valid values and
-    // taking everything else as the (shortest) valid range.
-    pub valid_range: RangeInclusive<u128>,
+/// Info required to optimize struct layout.
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, Debug)]
+enum StructKind {
+    /// A tuple, closure, or univariant which cannot be coerced to unsized.
+    AlwaysSizedUnivariant,
+    /// A univariant, the last field of which may be coerced to unsized.
+    MaybeUnsizedUnivariant,
+    /// A univariant, but part of an enum.
+    EnumVariant,
 }
 
-impl Scalar {
-    pub fn is_bool(&self) -> bool {
-        if let Int(I8, _) = self.value {
-            self.valid_range == (0..=1)
+impl<'a, 'tcx> Struct {
+    fn new(dl: &TargetDataLayout,
+           fields: &Vec<&'a Layout>,
+           repr: &ReprOptions,
+           kind: StructKind,
+           scapegoat: Ty<'tcx>)
+           -> Result<Struct, LayoutError<'tcx>> {
+        if repr.packed() && repr.align > 0 {
+            bug!("Struct cannot be packed and aligned");
+        }
+
+        let align = if repr.packed() {
+            dl.i8_align
         } else {
-            false
+            dl.aggregate_align
+        };
+
+        let mut ret = Struct {
+            align,
+            primitive_align: align,
+            packed: repr.packed(),
+            sized: true,
+            offsets: vec![],
+            memory_index: vec![],
+            min_size: Size::from_bytes(0),
+        };
+
+        // Anything with repr(C) or repr(packed) doesn't optimize.
+        // Neither do  1-member and 2-member structs.
+        // In addition, code in trans assume that 2-element structs can become pairs.
+        // It's easier to just short-circuit here.
+        let can_optimize = (fields.len() > 2 || StructKind::EnumVariant == kind)
+            && (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty();
+
+        let (optimize, sort_ascending) = match kind {
+            StructKind::AlwaysSizedUnivariant => (can_optimize, false),
+            StructKind::MaybeUnsizedUnivariant => (can_optimize, false),
+            StructKind::EnumVariant => {
+                assert!(fields.len() >= 1, "Enum variants must have discriminants.");
+                (can_optimize && fields[0].size(dl).bytes() == 1, true)
+            }
+        };
+
+        ret.offsets = vec![Size::from_bytes(0); fields.len()];
+        let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
+
+        if optimize {
+            let start = if let StructKind::EnumVariant = kind { 1 } else { 0 };
+            let end = if let StructKind::MaybeUnsizedUnivariant = kind {
+                fields.len() - 1
+            } else {
+                fields.len()
+            };
+            if end > start {
+                let optimizing  = &mut inverse_memory_index[start..end];
+                if sort_ascending {
+                    optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi());
+                } else {
+                    optimizing.sort_by(| &a, &b | {
+                        let a = fields[a as usize].align(dl).abi();
+                        let b = fields[b as usize].align(dl).abi();
+                        b.cmp(&a)
+                    });
+                }
+            }
         }
-    }
-}
 
-/// The first half of a fat pointer.
-/// - For a trait object, this is the address of the box.
-/// - For a slice, this is the base address.
-pub const FAT_PTR_ADDR: usize = 0;
+        // inverse_memory_index holds field indices by increasing memory offset.
+        // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
+        // We now write field offsets to the corresponding offset slot;
+        // field 5 with offset 0 puts 0 in offsets[5].
+        // At the bottom of this function, we use inverse_memory_index to produce memory_index.
 
-/// The second half of a fat pointer.
-/// - For a trait object, this is the address of the vtable.
-/// - For a slice, this is the length.
-pub const FAT_PTR_EXTRA: usize = 1;
+        if let StructKind::EnumVariant = kind {
+            assert_eq!(inverse_memory_index[0], 0,
+              "Enum variant discriminants must have the lowest offset.");
+        }
 
-/// Describes how the fields of a type are located in memory.
-#[derive(PartialEq, Eq, Hash, Debug)]
-pub enum FieldPlacement {
-    /// All fields start at no offset. The `usize` is the field count.
-    Union(usize),
+        let mut offset = Size::from_bytes(0);
 
-    /// Array/vector-like placement, with all fields of identical types.
-    Array {
-        stride: Size,
-        count: u64
-    },
+        for i in inverse_memory_index.iter() {
+            let field = fields[*i as usize];
+            if !ret.sized {
+                bug!("Struct::new: field #{} of `{}` comes after unsized field",
+                     ret.offsets.len(), scapegoat);
+            }
 
-    /// Struct-like placement, with precomputed offsets.
-    ///
-    /// Fields are guaranteed to not overlap, but note that gaps
-    /// before, between and after all the fields are NOT always
-    /// padding, and as such their contents may not be discarded.
-    /// For example, enum variants leave a gap at the start,
-    /// where the discriminant field in the enum layout goes.
-    Arbitrary {
-        /// Offsets for the first byte of each field,
-        /// ordered to match the source definition order.
-        /// This vector does not go in increasing order.
-        // FIXME(eddyb) use small vector optimization for the common case.
-        offsets: Vec<Size>,
-
-        /// Maps source order field indices to memory order indices,
-        /// depending how fields were permuted.
-        // FIXME(camlorn) also consider small vector  optimization here.
-        memory_index: Vec<u32>
-    }
-}
+            if field.is_unsized() {
+                ret.sized = false;
+            }
 
-impl FieldPlacement {
-    pub fn count(&self) -> usize {
-        match *self {
-            FieldPlacement::Union(count) => count,
-            FieldPlacement::Array { count, .. } => {
-                let usize_count = count as usize;
-                assert_eq!(usize_count as u64, count);
-                usize_count
+            // Invariant: offset < dl.obj_size_bound() <= 1<<61
+            if !ret.packed {
+                let align = field.align(dl);
+                let primitive_align = field.primitive_align(dl);
+                ret.align = ret.align.max(align);
+                ret.primitive_align = ret.primitive_align.max(primitive_align);
+                offset = offset.abi_align(align);
             }
-            FieldPlacement::Arbitrary { ref offsets, .. } => offsets.len()
+
+            debug!("Struct::new offset: {:?} field: {:?} {:?}", offset, field, field.size(dl));
+            ret.offsets[*i as usize] = offset;
+
+            offset = offset.checked_add(field.size(dl), dl)
+                           .map_or(Err(LayoutError::SizeOverflow(scapegoat)), Ok)?;
         }
-    }
 
-    pub fn offset(&self, i: usize) -> Size {
-        match *self {
-            FieldPlacement::Union(_) => Size::from_bytes(0),
-            FieldPlacement::Array { stride, count } => {
-                let i = i as u64;
-                assert!(i < count);
-                stride * i
+        if repr.align > 0 {
+            let repr_align = repr.align as u64;
+            ret.align = ret.align.max(Align::from_bytes(repr_align, repr_align).unwrap());
+            debug!("Struct::new repr_align: {:?}", repr_align);
+        }
+
+        debug!("Struct::new min_size: {:?}", offset);
+        ret.min_size = offset;
+
+        // As stated above, inverse_memory_index holds field indices by increasing offset.
+        // This makes it an already-sorted view of the offsets vec.
+        // To invert it, consider:
+        // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
+        // Field 5 would be the first element, so memory_index is i:
+        // Note: if we didn't optimize, it's already right.
+
+        if optimize {
+            ret.memory_index = vec![0; inverse_memory_index.len()];
+
+            for i in 0..inverse_memory_index.len() {
+                ret.memory_index[inverse_memory_index[i] as usize]  = i as u32;
             }
-            FieldPlacement::Arbitrary { ref offsets, .. } => offsets[i]
+        } else {
+            ret.memory_index = inverse_memory_index;
         }
+
+        Ok(ret)
     }
 
-    pub fn memory_index(&self, i: usize) -> usize {
-        match *self {
-            FieldPlacement::Union(_) |
-            FieldPlacement::Array { .. } => i,
-            FieldPlacement::Arbitrary { ref memory_index, .. } => {
-                let r = memory_index[i];
-                assert_eq!(r as usize as u32, r);
-                r as usize
+    /// Get the size with trailing alignment padding.
+    pub fn stride(&self) -> Size {
+        self.min_size.abi_align(self.align)
+    }
+
+    /// Determine whether a structure would be zero-sized, given its fields.
+    fn would_be_zero_sized<I>(dl: &TargetDataLayout, fields: I)
+                              -> Result<bool, LayoutError<'tcx>>
+    where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
+        for field in fields {
+            let field = field?;
+            if field.is_unsized() || field.size(dl).bytes() > 0 {
+                return Ok(false);
             }
         }
+        Ok(true)
     }
 
-    /// Get source indices of the fields by increasing offsets.
+    /// Get indices of the tys that made this struct by increasing offset.
     #[inline]
-    pub fn index_by_increasing_offset<'a>(&'a self) -> impl iter::Iterator<Item=usize>+'a {
+    pub fn field_index_by_increasing_offset<'b>(&'b self) -> impl iter::Iterator<Item=usize>+'b {
         let mut inverse_small = [0u8; 64];
         let mut inverse_big = vec![];
-        let use_small = self.count() <= inverse_small.len();
+        let use_small = self.memory_index.len() <= inverse_small.len();
 
         // We have to write this logic twice in order to keep the array small.
-        if let FieldPlacement::Arbitrary { ref memory_index, .. } = *self {
-            if use_small {
-                for i in 0..self.count() {
-                    inverse_small[memory_index[i] as usize] = i as u8;
+        if use_small {
+            for i in 0..self.memory_index.len() {
+                inverse_small[self.memory_index[i] as usize] = i as u8;
+            }
+        } else {
+            inverse_big = vec![0; self.memory_index.len()];
+            for i in 0..self.memory_index.len() {
+                inverse_big[self.memory_index[i] as usize] = i as u32;
+            }
+        }
+
+        (0..self.memory_index.len()).map(move |i| {
+            if use_small { inverse_small[i] as usize }
+            else { inverse_big[i] as usize }
+        })
+    }
+
+    /// Find the path leading to a non-zero leaf field, starting from
+    /// the given type and recursing through aggregates.
+    /// The tuple is `(path, source_path)`,
+    /// where `path` is in memory order and `source_path` in source order.
+    // FIXME(eddyb) track value ranges and traverse already optimized enums.
+    fn non_zero_field_in_type(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                              param_env: ty::ParamEnv<'tcx>,
+                              ty: Ty<'tcx>)
+                              -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'tcx>> {
+        match (ty.layout(tcx, param_env)?, &ty.sty) {
+            (&Scalar { non_zero: true, .. }, _) |
+            (&CEnum { non_zero: true, .. }, _) => Ok(Some((vec![], vec![]))),
+            (&FatPointer { non_zero: true, .. }, _) => {
+                Ok(Some((vec![FAT_PTR_ADDR as u32], vec![FAT_PTR_ADDR as u32])))
+            }
+
+            // Is this the NonZero lang item wrapping a pointer or integer type?
+            (&Univariant { non_zero: true, .. }, &ty::TyAdt(def, substs)) => {
+                let fields = &def.struct_variant().fields;
+                assert_eq!(fields.len(), 1);
+                match *fields[0].ty(tcx, substs).layout(tcx, param_env)? {
+                    // FIXME(eddyb) also allow floating-point types here.
+                    Scalar { value: Int(_), non_zero: false } |
+                    Scalar { value: Pointer, non_zero: false } => {
+                        Ok(Some((vec![0], vec![0])))
+                    }
+                    FatPointer { non_zero: false, .. } => {
+                        let tmp = vec![FAT_PTR_ADDR as u32, 0];
+                        Ok(Some((tmp.clone(), tmp)))
+                    }
+                    _ => Ok(None)
                 }
-            } else {
-                inverse_big = vec![0; self.count()];
-                for i in 0..self.count() {
-                    inverse_big[memory_index[i] as usize] = i as u32;
+            }
+
+            // Perhaps one of the fields of this struct is non-zero
+            // let's recurse and find out
+            (&Univariant { ref variant, .. }, &ty::TyAdt(def, substs)) if def.is_struct() => {
+                Struct::non_zero_field_paths(
+                    tcx,
+                    param_env,
+                    def.struct_variant().fields.iter().map(|field| {
+                        field.ty(tcx, substs)
+                    }),
+                    Some(&variant.memory_index[..]))
+            }
+
+            // Perhaps one of the upvars of this closure is non-zero
+            (&Univariant { ref variant, .. }, &ty::TyClosure(def, substs)) => {
+                let upvar_tys = substs.upvar_tys(def, tcx);
+                Struct::non_zero_field_paths(
+                    tcx,
+                    param_env,
+                    upvar_tys,
+                    Some(&variant.memory_index[..]))
+            }
+            // Can we use one of the fields in this tuple?
+            (&Univariant { ref variant, .. }, &ty::TyTuple(tys, _)) => {
+                Struct::non_zero_field_paths(
+                    tcx,
+                    param_env,
+                    tys.iter().cloned(),
+                    Some(&variant.memory_index[..]))
+            }
+
+            // Is this a fixed-size array of something non-zero
+            // with at least one element?
+            (_, &ty::TyArray(ety, mut count)) => {
+                if count.has_projections() {
+                    count = tcx.normalize_associated_type_in_env(&count, param_env);
+                    if count.has_projections() {
+                        return Err(LayoutError::Unknown(ty));
+                    }
+                }
+                if count.val.to_const_int().unwrap().to_u64().unwrap() != 0 {
+                    Struct::non_zero_field_paths(
+                        tcx,
+                        param_env,
+                        Some(ety).into_iter(),
+                        None)
+                } else {
+                    Ok(None)
                 }
             }
-        }
 
-        (0..self.count()).map(move |i| {
-            match *self {
-                FieldPlacement::Union(_) |
-                FieldPlacement::Array { .. } => i,
-                FieldPlacement::Arbitrary { .. } => {
-                    if use_small { inverse_small[i] as usize }
-                    else { inverse_big[i] as usize }
+            (_, &ty::TyProjection(_)) | (_, &ty::TyAnon(..)) => {
+                let normalized = tcx.normalize_associated_type_in_env(&ty, param_env);
+                if ty == normalized {
+                    return Ok(None);
                 }
+                return Struct::non_zero_field_in_type(tcx, param_env, normalized);
             }
-        })
-    }
-}
 
-/// Describes how values of the type are passed by target ABIs,
-/// in terms of categories of C types there are ABI rules for.
-#[derive(Clone, PartialEq, Eq, Hash, Debug)]
-pub enum Abi {
-    Uninhabited,
-    Scalar(Scalar),
-    ScalarPair(Scalar, Scalar),
-    Vector,
-    Aggregate {
-        /// If true, the size is exact, otherwise it's only a lower bound.
-        sized: bool,
-        packed: bool
+            // Anything else is not a non-zero type.
+            _ => Ok(None)
+        }
     }
-}
 
-impl Abi {
-    /// Returns true if the layout corresponds to an unsized type.
-    pub fn is_unsized(&self) -> bool {
-        match *self {
-            Abi::Uninhabited |
-            Abi::Scalar(_) |
-            Abi::ScalarPair(..) |
-            Abi::Vector => false,
-            Abi::Aggregate { sized, .. } => !sized
+    /// Find the path leading to a non-zero leaf field, starting from
+    /// the given set of fields and recursing through aggregates.
+    /// Returns Some((path, source_path)) on success.
+    /// `path` is translated to memory order. `source_path` is not.
+    fn non_zero_field_paths<I>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                               param_env: ty::ParamEnv<'tcx>,
+                               fields: I,
+                               permutation: Option<&[u32]>)
+                               -> Result<Option<(FieldPath, FieldPath)>, LayoutError<'tcx>>
+    where I: Iterator<Item=Ty<'tcx>> {
+        for (i, ty) in fields.enumerate() {
+            let r = Struct::non_zero_field_in_type(tcx, param_env, ty)?;
+            if let Some((mut path, mut source_path)) = r {
+                source_path.push(i as u32);
+                let index = if let Some(p) = permutation {
+                    p[i] as usize
+                } else {
+                    i
+                };
+                path.push(index as u32);
+                return Ok(Some((path, source_path)));
+            }
         }
+        Ok(None)
     }
 
-    /// Returns true if the fields of the layout are packed.
-    pub fn is_packed(&self) -> bool {
-        match *self {
-            Abi::Uninhabited |
-            Abi::Scalar(_) |
-            Abi::ScalarPair(..) |
-            Abi::Vector => false,
-            Abi::Aggregate { packed, .. } => packed
+    pub fn over_align(&self) -> Option<u32> {
+        let align = self.align.abi();
+        let primitive_align = self.primitive_align.abi();
+        if align > primitive_align {
+            Some(align as u32)
+        } else {
+            None
         }
     }
 }
 
+/// An untagged union.
 #[derive(PartialEq, Eq, Hash, Debug)]
-pub enum Variants {
-    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
-    Single {
-        index: usize
-    },
-
-    /// General-case enums: for each case there is a struct, and they all have
-    /// all space reserved for the discriminant, and their first field starts
-    /// at a non-0 offset, after where the discriminant would go.
-    Tagged {
-        discr: Scalar,
-        variants: Vec<LayoutDetails>,
-    },
+pub struct Union {
+    pub align: Align,
+    pub primitive_align: Align,
 
-    /// Multiple cases distinguished by a niche (values invalid for a type):
-    /// the variant `dataful_variant` contains a niche at an arbitrary
-    /// offset (field 0 of the enum), which for a variant with discriminant
-    /// `d` is set to `(d - niche_variants.start).wrapping_add(niche_start)`.
-    ///
-    /// For example, `Option<(usize, &T)>`  is represented such that
-    /// `None` has a null pointer for the second tuple field, and
-    /// `Some` is the identity function (with a non-null reference).
-    NicheFilling {
-        dataful_variant: usize,
-        niche_variants: RangeInclusive<usize>,
-        niche: Scalar,
-        niche_start: u128,
-        variants: Vec<LayoutDetails>,
-    }
-}
+    pub min_size: Size,
 
-#[derive(Copy, Clone, Debug)]
-pub enum LayoutError<'tcx> {
-    Unknown(Ty<'tcx>),
-    SizeOverflow(Ty<'tcx>)
+    /// If true, no alignment padding is used.
+    pub packed: bool,
 }
 
-impl<'tcx> fmt::Display for LayoutError<'tcx> {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match *self {
-            LayoutError::Unknown(ty) => {
-                write!(f, "the type `{:?}` has an unknown layout", ty)
-            }
-            LayoutError::SizeOverflow(ty) => {
-                write!(f, "the type `{:?}` is too big for the current architecture", ty)
-            }
+impl<'a, 'tcx> Union {
+    fn new(dl: &TargetDataLayout, repr: &ReprOptions) -> Union {
+        if repr.packed() && repr.align > 0 {
+            bug!("Union cannot be packed and aligned");
         }
-    }
-}
 
-#[derive(PartialEq, Eq, Hash, Debug)]
-pub struct LayoutDetails {
-    pub variants: Variants,
-    pub fields: FieldPlacement,
-    pub abi: Abi,
-    pub align: Align,
-    pub size: Size
-}
+        let primitive_align = if repr.packed() {
+            dl.i8_align
+        } else {
+            dl.aggregate_align
+        };
 
-impl LayoutDetails {
-    fn scalar<C: HasDataLayout>(cx: C, scalar: Scalar) -> Self {
-        let size = scalar.value.size(cx);
-        let align = scalar.value.align(cx);
-        LayoutDetails {
-            variants: Variants::Single { index: 0 },
-            fields: FieldPlacement::Union(0),
-            abi: Abi::Scalar(scalar),
-            size,
+        let align = if repr.align > 0 {
+            let repr_align = repr.align as u64;
+            debug!("Union::new repr_align: {:?}", repr_align);
+            primitive_align.max(Align::from_bytes(repr_align, repr_align).unwrap())
+        } else {
+            primitive_align
+        };
+
+        Union {
             align,
+            primitive_align,
+            min_size: Size::from_bytes(0),
+            packed: repr.packed(),
         }
     }
 
-    fn uninhabited(field_count: usize) -> Self {
-        let align = Align::from_bytes(1, 1).unwrap();
-        LayoutDetails {
-            variants: Variants::Single { index: 0 },
-            fields: FieldPlacement::Union(field_count),
-            abi: Abi::Uninhabited,
-            align,
-            size: Size::from_bytes(0)
+    /// Extend the Struct with more fields.
+    fn extend<I>(&mut self, dl: &TargetDataLayout,
+                 fields: I,
+                 scapegoat: Ty<'tcx>)
+                 -> Result<(), LayoutError<'tcx>>
+    where I: Iterator<Item=Result<&'a Layout, LayoutError<'tcx>>> {
+        for (index, field) in fields.enumerate() {
+            let field = field?;
+            if field.is_unsized() {
+                bug!("Union::extend: field #{} of `{}` is unsized",
+                     index, scapegoat);
+            }
+
+            debug!("Union::extend field: {:?} {:?}", field, field.size(dl));
+
+            if !self.packed {
+                self.align = self.align.max(field.align(dl));
+                self.primitive_align = self.primitive_align.max(field.primitive_align(dl));
+            }
+            self.min_size = cmp::max(self.min_size, field.size(dl));
         }
-    }
-}
 
-fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
-                        query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
-                        -> Result<&'tcx LayoutDetails, LayoutError<'tcx>>
-{
-    let (param_env, ty) = query.into_parts();
+        debug!("Union::extend min-size: {:?}", self.min_size);
 
-    let rec_limit = tcx.sess.recursion_limit.get();
-    let depth = tcx.layout_depth.get();
-    if depth > rec_limit {
-        tcx.sess.fatal(
-            &format!("overflow representing the type `{}`", ty));
+        Ok(())
     }
 
-    tcx.layout_depth.set(depth+1);
-    let layout = LayoutDetails::compute_uncached(tcx, param_env, ty);
-    tcx.layout_depth.set(depth);
+    /// Get the size with trailing alignment padding.
+    pub fn stride(&self) -> Size {
+        self.min_size.abi_align(self.align)
+    }
 
-    layout
+    pub fn over_align(&self) -> Option<u32> {
+        let align = self.align.abi();
+        let primitive_align = self.primitive_align.abi();
+        if align > primitive_align {
+            Some(align as u32)
+        } else {
+            None
+        }
+    }
 }
 
-pub fn provide(providers: &mut ty::maps::Providers) {
-    *providers = ty::maps::Providers {
-        layout_raw,
-        ..*providers
-    };
-}
+/// The first half of a fat pointer.
+/// - For a trait object, this is the address of the box.
+/// - For a slice, this is the base address.
+pub const FAT_PTR_ADDR: usize = 0;
 
-impl<'a, 'tcx> LayoutDetails {
-    fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>,
-                        param_env: ty::ParamEnv<'tcx>,
-                        ty: Ty<'tcx>)
-                        -> Result<&'tcx Self, LayoutError<'tcx>> {
-        let cx = (tcx, param_env);
-        let dl = cx.data_layout();
-        let scalar_unit = |value: Primitive| {
-            let bits = value.size(dl).bits();
-            assert!(bits <= 128);
-            Scalar {
-                value,
-                valid_range: 0..=(!0 >> (128 - bits))
-            }
-        };
-        let scalar = |value: Primitive| {
-            tcx.intern_layout(LayoutDetails::scalar(cx, scalar_unit(value)))
-        };
-        let scalar_pair = |a: Scalar, b: Scalar| {
-            let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align);
-            let b_offset = a.value.size(dl).abi_align(b.value.align(dl));
-            let size = (b_offset + b.value.size(dl)).abi_align(align);
-            LayoutDetails {
-                variants: Variants::Single { index: 0 },
-                fields: FieldPlacement::Arbitrary {
-                    offsets: vec![Size::from_bytes(0), b_offset],
-                    memory_index: vec![0, 1]
-                },
-                abi: Abi::ScalarPair(a, b),
-                align,
-                size
-            }
-        };
+/// The second half of a fat pointer.
+/// - For a trait object, this is the address of the vtable.
+/// - For a slice, this is the length.
+pub const FAT_PTR_EXTRA: usize = 1;
 
-        #[derive(Copy, Clone, Debug)]
-        enum StructKind {
-            /// A tuple, closure, or univariant which cannot be coerced to unsized.
-            AlwaysSized,
-            /// A univariant, the last field of which may be coerced to unsized.
-            MaybeUnsized,
-            /// A univariant, but part of an enum.
-            EnumVariant(Integer),
-        }
-        let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| {
-            let packed = repr.packed();
-            if packed && repr.align > 0 {
-                bug!("struct cannot be packed and aligned");
-            }
+/// Type layout, from which size and alignment can be cheaply computed.
+/// For ADTs, it also includes field placement and enum optimizations.
+/// NOTE: Because Layout is interned, redundant information should be
+/// kept to a minimum, e.g. it includes no sub-component Ty or Layout.
+#[derive(Debug, PartialEq, Eq, Hash)]
+pub enum Layout {
+    /// TyBool, TyChar, TyInt, TyUint, TyFloat, TyRawPtr, TyRef or TyFnPtr.
+    Scalar {
+        value: Primitive,
+        // If true, the value cannot represent a bit pattern of all zeroes.
+        non_zero: bool
+    },
 
-            let mut align = if packed {
-                dl.i8_align
-            } else {
-                dl.aggregate_align
-            };
+    /// SIMD vectors, from structs marked with #[repr(simd)].
+    Vector {
+        element: Primitive,
+        count: u64
+    },
 
-            let mut sized = true;
-            let mut offsets = vec![Size::from_bytes(0); fields.len()];
-            let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
+    /// TyArray, TySlice or TyStr.
+    Array {
+        /// If true, the size is exact, otherwise it's only a lower bound.
+        sized: bool,
+        align: Align,
+        primitive_align: Align,
+        element_size: Size,
+        count: u64
+    },
 
-            // Anything with repr(C) or repr(packed) doesn't optimize.
-            let optimize = match kind {
-                StructKind::AlwaysSized |
-                StructKind::MaybeUnsized |
-                StructKind::EnumVariant(I8) => {
-                    (repr.flags & ReprFlags::IS_UNOPTIMISABLE).is_empty()
-                }
-                StructKind::EnumVariant(_) => false
-            };
-            if optimize {
-                let end = if let StructKind::MaybeUnsized = kind {
-                    fields.len() - 1
-                } else {
-                    fields.len()
-                };
-                let optimizing = &mut inverse_memory_index[..end];
-                match kind {
-                    StructKind::AlwaysSized |
-                    StructKind::MaybeUnsized => {
-                        optimizing.sort_by_key(|&x| {
-                            // Place ZSTs first to avoid "interesting offsets",
-                            // especially with only one or two non-ZST fields.
-                            let f = &fields[x as usize];
-                            (!f.is_zst(), cmp::Reverse(f.align.abi()))
-                        })
-                    }
-                    StructKind::EnumVariant(_) => {
-                        optimizing.sort_by_key(|&x| fields[x as usize].align.abi());
-                    }
-                }
-            }
+    /// TyRawPtr or TyRef with a !Sized pointee.
+    FatPointer {
+        metadata: Primitive,
+        /// If true, the pointer cannot be null.
+        non_zero: bool
+    },
 
-            // inverse_memory_index holds field indices by increasing memory offset.
-            // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
-            // We now write field offsets to the corresponding offset slot;
-            // field 5 with offset 0 puts 0 in offsets[5].
-            // At the bottom of this function, we use inverse_memory_index to produce memory_index.
+    // Remaining variants are all ADTs such as structs, enums or tuples.
 
-            let mut offset = Size::from_bytes(0);
+    /// C-like enums; basically an integer.
+    CEnum {
+        discr: Integer,
+        signed: bool,
+        non_zero: bool,
+        /// Inclusive discriminant range.
+        /// If min > max, it represents min...u64::MAX followed by 0...max.
+        // FIXME(eddyb) always use the shortest range, e.g. by finding
+        // the largest space between two consecutive discriminants and
+        // taking everything else as the (shortest) discriminant range.
+        min: u64,
+        max: u64
+    },
 
-            if let StructKind::EnumVariant(discr) = kind {
-                offset = discr.size();
-                if !packed {
-                    let discr_align = discr.align(dl);
-                    align = align.max(discr_align);
-                }
-            }
+    /// Single-case enums, and structs/tuples.
+    Univariant {
+        variant: Struct,
+        /// If true, the structure is NonZero.
+        // FIXME(eddyb) use a newtype Layout kind for this.
+        non_zero: bool
+    },
 
-            for &i in &inverse_memory_index {
-                let field = fields[i as usize];
-                if !sized {
-                    bug!("univariant: field #{} of `{}` comes after unsized field",
-                        offsets.len(), ty);
-                }
+    /// Untagged unions.
+    UntaggedUnion {
+        variants: Union,
+    },
 
-                if field.abi == Abi::Uninhabited {
-                    return Ok(LayoutDetails::uninhabited(fields.len()));
-                }
+    /// General-case enums: for each case there is a struct, and they
+    /// all start with a field for the discriminant.
+    General {
+        discr: Integer,
+        variants: Vec<Struct>,
+        size: Size,
+        align: Align,
+        primitive_align: Align,
+    },
 
-                if field.is_unsized() {
-                    sized = false;
-                }
+    /// Two cases distinguished by a nullable pointer: the case with discriminant
+    /// `nndiscr` must have single field which is known to be nonnull due to its type.
+    /// The other case is known to be zero sized. Hence we represent the enum
+    /// as simply a nullable pointer: if not null it indicates the `nndiscr` variant,
+    /// otherwise it indicates the other case.
+    ///
+    /// For example, `std::option::Option` instantiated at a safe pointer type
+    /// is represented such that `None` is a null pointer and `Some` is the
+    /// identity function.
+    RawNullablePointer {
+        nndiscr: u64,
+        value: Primitive
+    },
 
-                // Invariant: offset < dl.obj_size_bound() <= 1<<61
-                if !packed {
-                    offset = offset.abi_align(field.align);
-                    align = align.max(field.align);
-                }
+    /// Two cases distinguished by a nullable pointer: the case with discriminant
+    /// `nndiscr` is represented by the struct `nonnull`, where the `discrfield`th
+    /// field is known to be nonnull due to its type; if that field is null, then
+    /// it represents the other case, which is known to be zero sized.
+    StructWrappedNullablePointer {
+        nndiscr: u64,
+        nonnull: Struct,
+        /// N.B. There is a 0 at the start, for LLVM GEP through a pointer.
+        discrfield: FieldPath,
+        /// Like discrfield, but in source order. For debuginfo.
+        discrfield_source: FieldPath
+    }
+}
 
-                debug!("univariant offset: {:?} field: {:#?}", offset, field);
-                offsets[i as usize] = offset;
+#[derive(Copy, Clone, Debug)]
+pub enum LayoutError<'tcx> {
+    Unknown(Ty<'tcx>),
+    SizeOverflow(Ty<'tcx>)
+}
 
-                offset = offset.checked_add(field.size, dl)
-                    .ok_or(LayoutError::SizeOverflow(ty))?;
+impl<'tcx> fmt::Display for LayoutError<'tcx> {
+    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+        match *self {
+            LayoutError::Unknown(ty) => {
+                write!(f, "the type `{:?}` has an unknown layout", ty)
             }
-
-            if repr.align > 0 {
-                let repr_align = repr.align as u64;
-                align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
-                debug!("univariant repr_align: {:?}", repr_align);
+            LayoutError::SizeOverflow(ty) => {
+                write!(f, "the type `{:?}` is too big for the current architecture", ty)
             }
+        }
+    }
+}
 
-            debug!("univariant min_size: {:?}", offset);
-            let min_size = offset;
-
-            // As stated above, inverse_memory_index holds field indices by increasing offset.
-            // This makes it an already-sorted view of the offsets vec.
-            // To invert it, consider:
-            // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
-            // Field 5 would be the first element, so memory_index is i:
-            // Note: if we didn't optimize, it's already right.
-
-            let mut memory_index;
-            if optimize {
-                memory_index = vec![0; inverse_memory_index.len()];
+impl<'a, 'tcx> Layout {
+    pub fn compute_uncached(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                            param_env: ty::ParamEnv<'tcx>,
+                            ty: Ty<'tcx>)
+                            -> Result<&'tcx Layout, LayoutError<'tcx>> {
+        let success = |layout| Ok(tcx.intern_layout(layout));
+        let dl = &tcx.data_layout;
+        assert!(!ty.has_infer_types());
 
-                for i in 0..inverse_memory_index.len() {
-                    memory_index[inverse_memory_index[i] as usize]  = i as u32;
-                }
+        let ptr_layout = |pointee: Ty<'tcx>| {
+            let non_zero = !ty.is_unsafe_ptr();
+            let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env);
+            if pointee.is_sized(tcx, param_env, DUMMY_SP) {
+                Ok(Scalar { value: Pointer, non_zero: non_zero })
             } else {
-                memory_index = inverse_memory_index;
-            }
-
-            let size = min_size.abi_align(align);
-            let mut abi = Abi::Aggregate {
-                sized,
-                packed
-            };
-
-            // Unpack newtype ABIs and find scalar pairs.
-            if sized && size.bytes() > 0 {
-                // All other fields must be ZSTs, and we need them to all start at 0.
-                let mut zst_offsets =
-                    offsets.iter().enumerate().filter(|&(i, _)| fields[i].is_zst());
-                if zst_offsets.all(|(_, o)| o.bytes() == 0) {
-                    let mut non_zst_fields =
-                        fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
-
-                    match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
-                        // We have exactly one non-ZST field.
-                        (Some((i, field)), None, None) => {
-                            // Field fills the struct and it has a scalar or scalar pair ABI.
-                            if offsets[i].bytes() == 0 && size == field.size {
-                                match field.abi {
-                                    // For plain scalars we can't unpack newtypes
-                                    // for `#[repr(C)]`, as that affects C ABIs.
-                                    Abi::Scalar(_) if optimize => {
-                                        abi = field.abi.clone();
-                                    }
-                                    // But scalar pairs are Rust-specific and get
-                                    // treated as aggregates by C ABIs anyway.
-                                    Abi::ScalarPair(..) => {
-                                        abi = field.abi.clone();
-                                    }
-                                    _ => {}
-                                }
-                            }
-                        }
-
-                        // Two non-ZST fields, and they're both scalars.
-                        (Some((i, &TyLayout {
-                            details: &LayoutDetails { abi: Abi::Scalar(ref a), .. }, ..
-                        })), Some((j, &TyLayout {
-                            details: &LayoutDetails { abi: Abi::Scalar(ref b), .. }, ..
-                        })), None) => {
-                            // Order by the memory placement, not source order.
-                            let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
-                                ((i, a), (j, b))
-                            } else {
-                                ((j, b), (i, a))
-                            };
-                            let pair = scalar_pair(a.clone(), b.clone());
-                            let pair_offsets = match pair.fields {
-                                FieldPlacement::Arbitrary {
-                                    ref offsets,
-                                    ref memory_index
-                                } => {
-                                    assert_eq!(memory_index, &[0, 1]);
-                                    offsets
-                                }
-                                _ => bug!()
-                            };
-                            if offsets[i] == pair_offsets[0] &&
-                               offsets[j] == pair_offsets[1] &&
-                               align == pair.align &&
-                               size == pair.size {
-                                // We can use `ScalarPair` only when it matches our
-                                // already computed layout (including `#[repr(C)]`).
-                                abi = pair.abi;
-                            }
-                        }
-
-                        _ => {}
-                    }
+                let unsized_part = tcx.struct_tail(pointee);
+                match unsized_part.sty {
+                    ty::TySlice(_) | ty::TyStr => Ok(FatPointer {
+                        metadata: Int(dl.ptr_sized_integer()),
+                        non_zero: non_zero
+                    }),
+                    ty::TyDynamic(..) => Ok(FatPointer { metadata: Pointer, non_zero: non_zero }),
+                    ty::TyForeign(..) => Ok(Scalar { value: Pointer, non_zero: non_zero }),
+                    _ => Err(LayoutError::Unknown(unsized_part)),
                 }
             }
-
-            Ok(LayoutDetails {
-                variants: Variants::Single { index: 0 },
-                fields: FieldPlacement::Arbitrary {
-                    offsets,
-                    memory_index
-                },
-                abi,
-                align,
-                size
-            })
-        };
-        let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
-            Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?))
         };
-        assert!(!ty.has_infer_types());
 
-        Ok(match ty.sty {
+        let layout = match ty.sty {
             // Basic scalars.
-            ty::TyBool => {
-                tcx.intern_layout(LayoutDetails::scalar(cx, Scalar {
-                    value: Int(I8, false),
-                    valid_range: 0..=1
-                }))
-            }
-            ty::TyChar => {
-                tcx.intern_layout(LayoutDetails::scalar(cx, Scalar {
-                    value: Int(I32, false),
-                    valid_range: 0..=0x10FFFF
-                }))
-            }
+            ty::TyBool => Scalar { value: Int(I1), non_zero: false },
+            ty::TyChar => Scalar { value: Int(I32), non_zero: false },
             ty::TyInt(ity) => {
-                scalar(Int(Integer::from_attr(dl, attr::SignedInt(ity)), true))
+                Scalar {
+                    value: Int(Integer::from_attr(dl, attr::SignedInt(ity))),
+                    non_zero: false
+                }
             }
             ty::TyUint(ity) => {
-                scalar(Int(Integer::from_attr(dl, attr::UnsignedInt(ity)), false))
-            }
-            ty::TyFloat(FloatTy::F32) => scalar(F32),
-            ty::TyFloat(FloatTy::F64) => scalar(F64),
-            ty::TyFnPtr(_) => {
-                let mut ptr = scalar_unit(Pointer);
-                ptr.valid_range.start = 1;
-                tcx.intern_layout(LayoutDetails::scalar(cx, ptr))
+                Scalar {
+                    value: Int(Integer::from_attr(dl, attr::UnsignedInt(ity))),
+                    non_zero: false
+                }
             }
+            ty::TyFloat(FloatTy::F32) => Scalar { value: F32, non_zero: false },
+            ty::TyFloat(FloatTy::F64) => Scalar { value: F64, non_zero: false },
+            ty::TyFnPtr(_) => Scalar { value: Pointer, non_zero: true },
 
             // The never type.
-            ty::TyNever => {
-                tcx.intern_layout(LayoutDetails::uninhabited(0))
-            }
+            ty::TyNever => Univariant {
+                variant: Struct::new(dl, &vec![], &ReprOptions::default(),
+                  StructKind::AlwaysSizedUnivariant, ty)?,
+                non_zero: false
+            },
 
             // Potentially-fat pointers.
             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
-                let mut data_ptr = scalar_unit(Pointer);
-                if !ty.is_unsafe_ptr() {
-                    data_ptr.valid_range.start = 1;
-                }
-
-                let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env);
-                if pointee.is_sized(tcx, param_env, DUMMY_SP) {
-                    return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr)));
-                }
-
-                let unsized_part = tcx.struct_tail(pointee);
-                let metadata = match unsized_part.sty {
-                    ty::TyForeign(..) => {
-                        return Ok(tcx.intern_layout(LayoutDetails::scalar(cx, data_ptr)));
-                    }
-                    ty::TySlice(_) | ty::TyStr => {
-                        scalar_unit(Int(dl.ptr_sized_integer(), false))
-                    }
-                    ty::TyDynamic(..) => {
-                        let mut vtable = scalar_unit(Pointer);
-                        vtable.valid_range.start = 1;
-                        vtable
-                    }
-                    _ => return Err(LayoutError::Unknown(unsized_part))
-                };
-
-                // Effectively a (ptr, meta) tuple.
-                tcx.intern_layout(scalar_pair(data_ptr, metadata))
+                ptr_layout(pointee)?
+            }
+            ty::TyAdt(def, _) if def.is_box() => {
+                ptr_layout(ty.boxed_ty())?
             }
 
             // Arrays and slices.
@@ -1229,350 +1198,284 @@ impl<'a, 'tcx> LayoutDetails {
                     }
                 }
 
-                let element = cx.layout_of(element)?;
+                let element = element.layout(tcx, param_env)?;
+                let element_size = element.size(dl);
                 let count = count.val.to_const_int().unwrap().to_u64().unwrap();
-                let size = element.size.checked_mul(count, dl)
-                    .ok_or(LayoutError::SizeOverflow(ty))?;
-
-                tcx.intern_layout(LayoutDetails {
-                    variants: Variants::Single { index: 0 },
-                    fields: FieldPlacement::Array {
-                        stride: element.size,
-                        count
-                    },
-                    abi: Abi::Aggregate {
-                        sized: true,
-                        packed: false
-                    },
-                    align: element.align,
-                    size
-                })
+                if element_size.checked_mul(count, dl).is_none() {
+                    return Err(LayoutError::SizeOverflow(ty));
+                }
+                Array {
+                    sized: true,
+                    align: element.align(dl),
+                    primitive_align: element.primitive_align(dl),
+                    element_size,
+                    count,
+                }
             }
             ty::TySlice(element) => {
-                let element = cx.layout_of(element)?;
-                tcx.intern_layout(LayoutDetails {
-                    variants: Variants::Single { index: 0 },
-                    fields: FieldPlacement::Array {
-                        stride: element.size,
-                        count: 0
-                    },
-                    abi: Abi::Aggregate {
-                        sized: false,
-                        packed: false
-                    },
-                    align: element.align,
-                    size: Size::from_bytes(0)
-                })
+                let element = element.layout(tcx, param_env)?;
+                Array {
+                    sized: false,
+                    align: element.align(dl),
+                    primitive_align: element.primitive_align(dl),
+                    element_size: element.size(dl),
+                    count: 0
+                }
             }
             ty::TyStr => {
-                tcx.intern_layout(LayoutDetails {
-                    variants: Variants::Single { index: 0 },
-                    fields: FieldPlacement::Array {
-                        stride: Size::from_bytes(1),
-                        count: 0
-                    },
-                    abi: Abi::Aggregate {
-                        sized: false,
-                        packed: false
-                    },
+                Array {
+                    sized: false,
                     align: dl.i8_align,
-                    size: Size::from_bytes(0)
-                })
+                    primitive_align: dl.i8_align,
+                    element_size: Size::from_bytes(1),
+                    count: 0
+                }
             }
 
             // Odd unit types.
             ty::TyFnDef(..) => {
-                univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?
+                Univariant {
+                    variant: Struct::new(dl, &vec![],
+                      &ReprOptions::default(), StructKind::AlwaysSizedUnivariant, ty)?,
+                    non_zero: false
+                }
             }
             ty::TyDynamic(..) | ty::TyForeign(..) => {
-                let mut unit = univariant_uninterned(&[], &ReprOptions::default(),
-                  StructKind::AlwaysSized)?;
-                match unit.abi {
-                    Abi::Aggregate { ref mut sized, .. } => *sized = false,
-                    _ => bug!()
-                }
-                tcx.intern_layout(unit)
+                let mut unit = Struct::new(dl, &vec![], &ReprOptions::default(),
+                  StructKind::AlwaysSizedUnivariant, ty)?;
+                unit.sized = false;
+                Univariant { variant: unit, non_zero: false }
             }
 
             // Tuples, generators and closures.
             ty::TyGenerator(def_id, ref substs, _) => {
                 let tys = substs.field_tys(def_id, tcx);
-                univariant(&tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
+                let st = Struct::new(dl,
+                    &tys.map(|ty| ty.layout(tcx, param_env))
+                      .collect::<Result<Vec<_>, _>>()?,
                     &ReprOptions::default(),
-                    StructKind::AlwaysSized)?
+                    StructKind::AlwaysSizedUnivariant, ty)?;
+                Univariant { variant: st, non_zero: false }
             }
 
             ty::TyClosure(def_id, ref substs) => {
                 let tys = substs.upvar_tys(def_id, tcx);
-                univariant(&tys.map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
+                let st = Struct::new(dl,
+                    &tys.map(|ty| ty.layout(tcx, param_env))
+                      .collect::<Result<Vec<_>, _>>()?,
                     &ReprOptions::default(),
-                    StructKind::AlwaysSized)?
+                    StructKind::AlwaysSizedUnivariant, ty)?;
+                Univariant { variant: st, non_zero: false }
             }
 
             ty::TyTuple(tys, _) => {
                 let kind = if tys.len() == 0 {
-                    StructKind::AlwaysSized
+                    StructKind::AlwaysSizedUnivariant
                 } else {
-                    StructKind::MaybeUnsized
+                    StructKind::MaybeUnsizedUnivariant
                 };
 
-                univariant(&tys.iter().map(|ty| cx.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
-                    &ReprOptions::default(), kind)?
+                let st = Struct::new(dl,
+                    &tys.iter().map(|ty| ty.layout(tcx, param_env))
+                      .collect::<Result<Vec<_>, _>>()?,
+                    &ReprOptions::default(), kind, ty)?;
+                Univariant { variant: st, non_zero: false }
             }
 
             // SIMD vector types.
             ty::TyAdt(def, ..) if def.repr.simd() => {
-                let count = ty.simd_size(tcx) as u64;
-                let element = cx.layout_of(ty.simd_type(tcx))?;
-                match element.abi {
-                    Abi::Scalar(_) => {}
+                let element = ty.simd_type(tcx);
+                match *element.layout(tcx, param_env)? {
+                    Scalar { value, .. } => {
+                        return success(Vector {
+                            element: value,
+                            count: ty.simd_size(tcx) as u64
+                        });
+                    }
                     _ => {
                         tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
                                                 a non-machine element type `{}`",
-                                                ty, element.ty));
+                                                ty, element));
                     }
                 }
-                let size = element.size.checked_mul(count, dl)
-                    .ok_or(LayoutError::SizeOverflow(ty))?;
-                let align = dl.vector_align(size);
-                let size = size.abi_align(align);
-
-                tcx.intern_layout(LayoutDetails {
-                    variants: Variants::Single { index: 0 },
-                    fields: FieldPlacement::Array {
-                        stride: element.size,
-                        count
-                    },
-                    abi: Abi::Vector,
-                    size,
-                    align,
-                })
             }
 
             // ADTs.
             ty::TyAdt(def, substs) => {
-                // Cache the field layouts.
-                let variants = def.variants.iter().map(|v| {
-                    v.fields.iter().map(|field| {
-                        cx.layout_of(field.ty(tcx, substs))
-                    }).collect::<Result<Vec<_>, _>>()
-                }).collect::<Result<Vec<_>, _>>()?;
+                if def.variants.is_empty() {
+                    // Uninhabitable; represent as unit
+                    // (Typechecking will reject discriminant-sizing attrs.)
 
-                let (inh_first, inh_second) = {
-                    let mut inh_variants = (0..variants.len()).filter(|&v| {
-                        variants[v].iter().all(|f| f.abi != Abi::Uninhabited)
+                    return success(Univariant {
+                        variant: Struct::new(dl, &vec![],
+                          &def.repr, StructKind::AlwaysSizedUnivariant, ty)?,
+                        non_zero: false
                     });
-                    (inh_variants.next(), inh_variants.next())
-                };
-                if inh_first.is_none() {
-                    // Uninhabited because it has no variants, or only uninhabited ones.
-                    return Ok(tcx.intern_layout(LayoutDetails::uninhabited(0)));
                 }
 
-                if def.is_union() {
-                    let packed = def.repr.packed();
-                    if packed && def.repr.align > 0 {
-                        bug!("Union cannot be packed and aligned");
+                if def.is_enum() && def.variants.iter().all(|v| v.fields.is_empty()) {
+                    // All bodies empty -> intlike
+                    let (mut min, mut max, mut non_zero) = (i64::max_value(),
+                                                            i64::min_value(),
+                                                            true);
+                    for discr in def.discriminants(tcx) {
+                        let x = discr.to_u128_unchecked() as i64;
+                        if x == 0 { non_zero = false; }
+                        if x < min { min = x; }
+                        if x > max { max = x; }
                     }
 
-                    let mut align = if def.repr.packed() {
-                        dl.i8_align
-                    } else {
-                        dl.aggregate_align
-                    };
-
-                    if def.repr.align > 0 {
-                        let repr_align = def.repr.align as u64;
-                        align = align.max(
-                            Align::from_bytes(repr_align, repr_align).unwrap());
-                    }
-
-                    let mut size = Size::from_bytes(0);
-                    for field in &variants[0] {
-                        assert!(!field.is_unsized());
-
-                        if !packed {
-                            align = align.max(field.align);
-                        }
-                        size = cmp::max(size, field.size);
-                    }
-
-                    return Ok(tcx.intern_layout(LayoutDetails {
-                        variants: Variants::Single { index: 0 },
-                        fields: FieldPlacement::Union(variants[0].len()),
-                        abi: Abi::Aggregate {
-                            sized: true,
-                            packed
-                        },
-                        align,
-                        size: size.abi_align(align)
-                    }));
+                    // FIXME: should handle i128? signed-value based impl is weird and hard to
+                    // grok.
+                    let (discr, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
+                    return success(CEnum {
+                        discr,
+                        signed,
+                        non_zero,
+                        // FIXME: should be u128?
+                        min: min as u64,
+                        max: max as u64
+                    });
                 }
 
-                let is_struct = !def.is_enum() ||
-                    // Only one variant is inhabited.
-                    (inh_second.is_none() &&
-                    // Representation optimizations are allowed.
-                     !def.repr.inhibit_enum_layout_opt() &&
-                    // Inhabited variant either has data ...
-                     (!variants[inh_first.unwrap()].is_empty() ||
-                    // ... or there other, uninhabited, variants.
-                      variants.len() > 1));
-                if is_struct {
-                    // Struct, or univariant enum equivalent to a struct.
+                if !def.is_enum() || (def.variants.len() == 1 &&
+                                      !def.repr.inhibit_enum_layout_opt()) {
+                    // Struct, or union, or univariant enum equivalent to a struct.
                     // (Typechecking will reject discriminant-sizing attrs.)
 
-                    let v = inh_first.unwrap();
-                    let kind = if def.is_enum() || variants[v].len() == 0 {
-                        StructKind::AlwaysSized
+                    let kind = if def.is_enum() || def.variants[0].fields.len() == 0{
+                        StructKind::AlwaysSizedUnivariant
                     } else {
                         let param_env = tcx.param_env(def.did);
-                        let last_field = def.variants[v].fields.last().unwrap();
+                        let fields = &def.variants[0].fields;
+                        let last_field = &fields[fields.len()-1];
                         let always_sized = tcx.type_of(last_field.did)
                           .is_sized(tcx, param_env, DUMMY_SP);
-                        if !always_sized { StructKind::MaybeUnsized }
-                        else { StructKind::AlwaysSized }
+                        if !always_sized { StructKind::MaybeUnsizedUnivariant }
+                        else { StructKind::AlwaysSizedUnivariant }
                     };
 
-                    let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
-                    st.variants = Variants::Single { index: v };
-                    // Exclude 0 from the range of a newtype ABI NonZero<T>.
-                    if Some(def.did) == cx.tcx().lang_items().non_zero() {
-                        match st.abi {
-                            Abi::Scalar(ref mut scalar) |
-                            Abi::ScalarPair(ref mut scalar, _) => {
-                                if scalar.valid_range.start == 0 {
-                                    scalar.valid_range.start = 1;
-                                }
-                            }
-                            _ => {}
-                        }
-                    }
-                    return Ok(tcx.intern_layout(st));
+                    let fields = def.variants[0].fields.iter().map(|field| {
+                        field.ty(tcx, substs).layout(tcx, param_env)
+                    }).collect::<Result<Vec<_>, _>>()?;
+                    let layout = if def.is_union() {
+                        let mut un = Union::new(dl, &def.repr);
+                        un.extend(dl, fields.iter().map(|&f| Ok(f)), ty)?;
+                        UntaggedUnion { variants: un }
+                    } else {
+                        let st = Struct::new(dl, &fields, &def.repr,
+                          kind, ty)?;
+                        let non_zero = Some(def.did) == tcx.lang_items().non_zero();
+                        Univariant { variant: st, non_zero: non_zero }
+                    };
+                    return success(layout);
                 }
 
-                let no_explicit_discriminants = def.variants.iter().enumerate()
-                    .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i));
-
-                // Niche-filling enum optimization.
-                if !def.repr.inhibit_enum_layout_opt() && no_explicit_discriminants {
-                    let mut dataful_variant = None;
-                    let mut niche_variants = usize::max_value()..=0;
-
-                    // Find one non-ZST variant.
-                    'variants: for (v, fields) in variants.iter().enumerate() {
-                        for f in fields {
-                            if f.abi == Abi::Uninhabited {
-                                continue 'variants;
-                            }
-                            if !f.is_zst() {
-                                if dataful_variant.is_none() {
-                                    dataful_variant = Some(v);
-                                    continue 'variants;
-                                } else {
-                                    dataful_variant = None;
-                                    break 'variants;
-                                }
-                            }
-                        }
-                        if niche_variants.start > v {
-                            niche_variants.start = v;
-                        }
-                        niche_variants.end = v;
-                    }
-
-                    if niche_variants.start > niche_variants.end {
-                        dataful_variant = None;
+                // Since there's at least one
+                // non-empty body, explicit discriminants should have
+                // been rejected by a checker before this point.
+                for (i, v) in def.variants.iter().enumerate() {
+                    if v.discr != ty::VariantDiscr::Relative(i) {
+                        bug!("non-C-like enum {} with specified discriminants",
+                            tcx.item_path_str(def.did));
                     }
+                }
 
-                    if let Some(i) = dataful_variant {
-                        let count = (niche_variants.end - niche_variants.start + 1) as u128;
-                        for (field_index, field) in variants[i].iter().enumerate() {
-                            let (offset, niche, niche_start) =
-                                match field.find_niche(cx, count)? {
-                                    Some(niche) => niche,
-                                    None => continue
-                                };
-                            let st = variants.iter().enumerate().map(|(j, v)| {
-                                let mut st = univariant_uninterned(v,
-                                    &def.repr, StructKind::AlwaysSized)?;
-                                st.variants = Variants::Single { index: j };
-                                Ok(st)
-                            }).collect::<Result<Vec<_>, _>>()?;
-
-                            let offset = st[i].fields.offset(field_index) + offset;
-                            let LayoutDetails { size, mut align, .. } = st[i];
-
-                            let mut niche_align = niche.value.align(dl);
-                            let abi = if offset.bytes() == 0 && niche.value.size(dl) == size {
-                                Abi::Scalar(niche.clone())
-                            } else {
-                                let mut packed = st[i].abi.is_packed();
-                                if offset.abi_align(niche_align) != offset {
-                                    packed = true;
-                                    niche_align = dl.i8_align;
-                                }
-                                Abi::Aggregate {
-                                    sized: true,
-                                    packed
-                                }
+                // Cache the substituted and normalized variant field types.
+                let variants = def.variants.iter().map(|v| {
+                    v.fields.iter().map(|field| field.ty(tcx, substs)).collect::<Vec<_>>()
+                }).collect::<Vec<_>>();
+
+                if variants.len() == 2 && !def.repr.inhibit_enum_layout_opt() {
+                    // Nullable pointer optimization
+                    for discr in 0..2 {
+                        let other_fields = variants[1 - discr].iter().map(|ty| {
+                            ty.layout(tcx, param_env)
+                        });
+                        if !Struct::would_be_zero_sized(dl, other_fields)? {
+                            continue;
+                        }
+                        let paths = Struct::non_zero_field_paths(tcx,
+                                                                 param_env,
+                                                                 variants[discr].iter().cloned(),
+                                                                 None)?;
+                        let (mut path, mut path_source) = if let Some(p) = paths { p }
+                          else { continue };
+
+                        // FIXME(eddyb) should take advantage of a newtype.
+                        if path == &[0] && variants[discr].len() == 1 {
+                            let value = match *variants[discr][0].layout(tcx, param_env)? {
+                                Scalar { value, .. } => value,
+                                CEnum { discr, .. } => Int(discr),
+                                _ => bug!("Layout::compute: `{}`'s non-zero \
+                                           `{}` field not scalar?!",
+                                           ty, variants[discr][0])
                             };
-                            align = align.max(niche_align);
-
-                            return Ok(tcx.intern_layout(LayoutDetails {
-                                variants: Variants::NicheFilling {
-                                    dataful_variant: i,
-                                    niche_variants,
-                                    niche,
-                                    niche_start,
-                                    variants: st,
-                                },
-                                fields: FieldPlacement::Arbitrary {
-                                    offsets: vec![offset],
-                                    memory_index: vec![0]
-                                },
-                                abi,
-                                size,
-                                align,
-                            }));
+                            return success(RawNullablePointer {
+                                nndiscr: discr as u64,
+                                value,
+                            });
                         }
-                    }
-                }
 
-                let (mut min, mut max) = (i128::max_value(), i128::min_value());
-                for (i, discr) in def.discriminants(tcx).enumerate() {
-                    if variants[i].iter().any(|f| f.abi == Abi::Uninhabited) {
-                        continue;
+                        let st = Struct::new(dl,
+                            &variants[discr].iter().map(|ty| ty.layout(tcx, param_env))
+                              .collect::<Result<Vec<_>, _>>()?,
+                            &def.repr, StructKind::AlwaysSizedUnivariant, ty)?;
+
+                        // We have to fix the last element of path here.
+                        let mut i = *path.last().unwrap();
+                        i = st.memory_index[i as usize];
+                        *path.last_mut().unwrap() = i;
+                        path.push(0); // For GEP through a pointer.
+                        path.reverse();
+                        path_source.push(0);
+                        path_source.reverse();
+
+                        return success(StructWrappedNullablePointer {
+                            nndiscr: discr as u64,
+                            nonnull: st,
+                            discrfield: path,
+                            discrfield_source: path_source
+                        });
                     }
-                    let x = discr.to_u128_unchecked() as i128;
-                    if x < min { min = x; }
-                    if x > max { max = x; }
                 }
-                assert!(min <= max, "discriminant range is {}...{}", min, max);
-                let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max);
 
+                // The general case.
+                let discr_max = (variants.len() - 1) as i64;
+                assert!(discr_max >= 0);
+                let (min_ity, _) = Integer::repr_discr(tcx, ty, &def.repr, 0, discr_max);
                 let mut align = dl.aggregate_align;
+                let mut primitive_align = dl.aggregate_align;
                 let mut size = Size::from_bytes(0);
 
                 // We're interested in the smallest alignment, so start large.
                 let mut start_align = Align::from_bytes(256, 256).unwrap();
-                assert_eq!(Integer::for_abi_align(dl, start_align), None);
 
-                // Create the set of structs that represent each variant.
-                let mut variants = variants.into_iter().enumerate().map(|(i, field_layouts)| {
-                    let mut st = univariant_uninterned(&field_layouts,
-                        &def.repr, StructKind::EnumVariant(min_ity))?;
-                    st.variants = Variants::Single { index: i };
+                // Create the set of structs that represent each variant
+                // Use the minimum integer type we figured out above
+                let discr = Scalar { value: Int(min_ity), non_zero: false };
+                let mut variants = variants.into_iter().map(|fields| {
+                    let mut fields = fields.into_iter().map(|field| {
+                        field.layout(tcx, param_env)
+                    }).collect::<Result<Vec<_>, _>>()?;
+                    fields.insert(0, &discr);
+                    let st = Struct::new(dl,
+                        &fields,
+                        &def.repr, StructKind::EnumVariant, ty)?;
                     // Find the first field we can't move later
                     // to make room for a larger discriminant.
-                    for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
-                        if !field.is_zst() || field.align.abi() != 1 {
-                            start_align = start_align.min(field.align);
+                    // It is important to skip the first field.
+                    for i in st.field_index_by_increasing_offset().skip(1) {
+                        let field = fields[i];
+                        let field_align = field.align(dl);
+                        if field.size(dl).bytes() != 0 || field_align.abi() != 1 {
+                            start_align = start_align.min(field_align);
                             break;
                         }
                     }
-                    size = cmp::max(size, st.size);
+                    size = cmp::max(size, st.min_size);
                     align = align.max(st.align);
+                    primitive_align = primitive_align.max(st.primitive_align);
                     Ok(st)
                 }).collect::<Result<Vec<_>, _>>()?;
 
@@ -1618,55 +1521,30 @@ impl<'a, 'tcx> LayoutDetails {
                     ity = min_ity;
                 } else {
                     // Patch up the variants' first few fields.
-                    let old_ity_size = min_ity.size();
-                    let new_ity_size = ity.size();
+                    let old_ity_size = Int(min_ity).size(dl);
+                    let new_ity_size = Int(ity).size(dl);
                     for variant in &mut variants {
-                        if variant.abi == Abi::Uninhabited {
-                            continue;
-                        }
-                        match variant.fields {
-                            FieldPlacement::Arbitrary { ref mut offsets, .. } => {
-                                for i in offsets {
-                                    if *i <= old_ity_size {
-                                        assert_eq!(*i, old_ity_size);
-                                        *i = new_ity_size;
-                                    }
-                                }
-                                // We might be making the struct larger.
-                                if variant.size <= old_ity_size {
-                                    variant.size = new_ity_size;
-                                }
+                        for i in variant.offsets.iter_mut() {
+                            // The first field is the discrimminant, at offset 0.
+                            // These aren't in order, and we need to skip it.
+                            if *i <= old_ity_size && *i > Size::from_bytes(0) {
+                                *i = new_ity_size;
                             }
-                            _ => bug!()
+                        }
+                        // We might be making the struct larger.
+                        if variant.min_size <= old_ity_size {
+                            variant.min_size = new_ity_size;
                         }
                     }
                 }
 
-                let discr = Scalar {
-                    value: Int(ity, signed),
-                    valid_range: (min as u128)..=(max as u128)
-                };
-                let abi = if discr.value.size(dl) == size {
-                    Abi::Scalar(discr.clone())
-                } else {
-                    Abi::Aggregate {
-                        sized: true,
-                        packed: false
-                    }
-                };
-                tcx.intern_layout(LayoutDetails {
-                    variants: Variants::Tagged {
-                        discr,
-                        variants
-                    },
-                    // FIXME(eddyb): using `FieldPlacement::Arbitrary` here results
-                    // in lost optimizations, specifically around allocations, see
-                    // `test/codegen/{alloc-optimisation,vec-optimizes-away}.rs`.
-                    fields: FieldPlacement::Union(1),
-                    abi,
+                General {
+                    discr: ity,
+                    variants,
+                    size,
                     align,
-                    size
-                })
+                    primitive_align,
+                }
             }
 
             // Types with no meaningful known layout.
@@ -1675,24 +1553,204 @@ impl<'a, 'tcx> LayoutDetails {
                 if ty == normalized {
                     return Err(LayoutError::Unknown(ty));
                 }
-                tcx.layout_raw(param_env.and(normalized))?
+                return normalized.layout(tcx, param_env);
             }
             ty::TyParam(_) => {
                 return Err(LayoutError::Unknown(ty));
             }
             ty::TyInfer(_) | ty::TyError => {
-                bug!("LayoutDetails::compute: unexpected type `{}`", ty)
+                bug!("Layout::compute: unexpected type `{}`", ty)
             }
-        })
+        };
+
+        success(layout)
+    }
+
+    /// Returns true if the layout corresponds to an unsized type.
+    pub fn is_unsized(&self) -> bool {
+        match *self {
+            Scalar {..} | Vector {..} | FatPointer {..} |
+            CEnum {..} | UntaggedUnion {..} | General {..} |
+            RawNullablePointer {..} |
+            StructWrappedNullablePointer {..} => false,
+
+            Array { sized, .. } |
+            Univariant { variant: Struct { sized, .. }, .. } => !sized
+        }
+    }
+
+    pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
+        let dl = cx.data_layout();
+
+        match *self {
+            Scalar { value, .. } | RawNullablePointer { value, .. } => {
+                value.size(dl)
+            }
+
+            Vector { element, count } => {
+                let element_size = element.size(dl);
+                let vec_size = match element_size.checked_mul(count, dl) {
+                    Some(size) => size,
+                    None => bug!("Layout::size({:?}): {} * {} overflowed",
+                                 self, element_size.bytes(), count)
+                };
+                vec_size.abi_align(self.align(dl))
+            }
+
+            Array { element_size, count, .. } => {
+                match element_size.checked_mul(count, dl) {
+                    Some(size) => size,
+                    None => bug!("Layout::size({:?}): {} * {} overflowed",
+                                 self, element_size.bytes(), count)
+                }
+            }
+
+            FatPointer { metadata, .. } => {
+                // Effectively a (ptr, meta) tuple.
+                Pointer.size(dl).abi_align(metadata.align(dl))
+                       .checked_add(metadata.size(dl), dl).unwrap()
+                       .abi_align(self.align(dl))
+            }
+
+            CEnum { discr, .. } => Int(discr).size(dl),
+            General { size, .. } => size,
+            UntaggedUnion { ref variants } => variants.stride(),
+
+            Univariant { ref variant, .. } |
+            StructWrappedNullablePointer { nonnull: ref variant, .. } => {
+                variant.stride()
+            }
+        }
+    }
+
+    pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
+        let dl = cx.data_layout();
+
+        match *self {
+            Scalar { value, .. } | RawNullablePointer { value, .. } => {
+                value.align(dl)
+            }
+
+            Vector { element, count } => {
+                let elem_size = element.size(dl);
+                let vec_size = match elem_size.checked_mul(count, dl) {
+                    Some(size) => size,
+                    None => bug!("Layout::align({:?}): {} * {} overflowed",
+                                 self, elem_size.bytes(), count)
+                };
+                for &(size, align) in &dl.vector_align {
+                    if size == vec_size {
+                        return align;
+                    }
+                }
+                // Default to natural alignment, which is what LLVM does.
+                // That is, use the size, rounded up to a power of 2.
+                let align = vec_size.bytes().next_power_of_two();
+                Align::from_bytes(align, align).unwrap()
+            }
+
+            FatPointer { metadata, .. } => {
+                // Effectively a (ptr, meta) tuple.
+                Pointer.align(dl).max(metadata.align(dl))
+            }
+
+            CEnum { discr, .. } => Int(discr).align(dl),
+            Array { align, .. } | General { align, .. } => align,
+            UntaggedUnion { ref variants } => variants.align,
+
+            Univariant { ref variant, .. } |
+            StructWrappedNullablePointer { nonnull: ref variant, .. } => {
+                variant.align
+            }
+        }
+    }
+
+    /// Returns alignment before repr alignment is applied
+    pub fn primitive_align(&self, dl: &TargetDataLayout) -> Align {
+        match *self {
+            Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align,
+            Univariant { ref variant, .. } |
+            StructWrappedNullablePointer { nonnull: ref variant, .. } => {
+                variant.primitive_align
+            },
+
+            _ => self.align(dl)
+        }
+    }
+
+    /// Returns repr alignment if it is greater than the primitive alignment.
+    pub fn over_align(&self, dl: &TargetDataLayout) -> Option<u32> {
+        let align = self.align(dl);
+        let primitive_align = self.primitive_align(dl);
+        if align.abi() > primitive_align.abi() {
+            Some(align.abi() as u32)
+        } else {
+            None
+        }
+    }
+
+    pub fn field_offset<C: HasDataLayout>(&self,
+                                          cx: C,
+                                          i: usize,
+                                          variant_index: Option<usize>)
+                                          -> Size {
+        let dl = cx.data_layout();
+
+        match *self {
+            Scalar { .. } |
+            CEnum { .. } |
+            UntaggedUnion { .. } |
+            RawNullablePointer { .. } => {
+                Size::from_bytes(0)
+            }
+
+            Vector { element, count } => {
+                let element_size = element.size(dl);
+                let i = i as u64;
+                assert!(i < count);
+                Size::from_bytes(element_size.bytes() * count)
+            }
+
+            Array { element_size, count, .. } => {
+                let i = i as u64;
+                assert!(i < count);
+                Size::from_bytes(element_size.bytes() * count)
+            }
+
+            FatPointer { metadata, .. } => {
+                // Effectively a (ptr, meta) tuple.
+                assert!(i < 2);
+                if i == 0 {
+                    Size::from_bytes(0)
+                } else {
+                    Pointer.size(dl).abi_align(metadata.align(dl))
+                }
+            }
+
+            Univariant { ref variant, .. } => variant.offsets[i],
+
+            General { ref variants, .. } => {
+                let v = variant_index.expect("variant index required");
+                variants[v].offsets[i + 1]
+            }
+
+            StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
+                if Some(nndiscr as usize) == variant_index {
+                    nonnull.offsets[i]
+                } else {
+                    Size::from_bytes(0)
+                }
+            }
+        }
     }
 
     /// This is invoked by the `layout_raw` query to record the final
     /// layout of each type.
     #[inline]
-    fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>,
-                                  ty: Ty<'tcx>,
-                                  param_env: ty::ParamEnv<'tcx>,
-                                  layout: TyLayout<'tcx>) {
+    pub fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                                      ty: Ty<'tcx>,
+                                      param_env: ty::ParamEnv<'tcx>,
+                                      layout: &Layout) {
         // If we are running with `-Zprint-type-sizes`, record layouts for
         // dumping later. Ignore layouts that are done with non-empty
         // environments or non-monomorphic layouts, as the user only wants
@@ -1712,23 +1770,24 @@ impl<'a, 'tcx> LayoutDetails {
     fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>,
                                            ty: Ty<'tcx>,
                                            param_env: ty::ParamEnv<'tcx>,
-                                           layout: TyLayout<'tcx>) {
-        let cx = (tcx, param_env);
+                                           layout: &Layout) {
         // (delay format until we actually need it)
         let record = |kind, opt_discr_size, variants| {
             let type_desc = format!("{:?}", ty);
+            let overall_size = layout.size(tcx);
+            let align = layout.align(tcx);
             tcx.sess.code_stats.borrow_mut().record_type_size(kind,
                                                               type_desc,
-                                                              layout.align,
-                                                              layout.size,
+                                                              align,
+                                                              overall_size,
                                                               opt_discr_size,
                                                               variants);
         };
 
-        let adt_def = match ty.sty {
-            ty::TyAdt(ref adt_def, _) => {
+        let (adt_def, substs) = match ty.sty {
+            ty::TyAdt(ref adt_def, substs) => {
                 debug!("print-type-size t: `{:?}` process adt", ty);
-                adt_def
+                (adt_def, substs)
             }
 
             ty::TyClosure(..) => {
@@ -1745,61 +1804,106 @@ impl<'a, 'tcx> LayoutDetails {
 
         let adt_kind = adt_def.adt_kind();
 
-        let build_variant_info = |n: Option<ast::Name>,
-                                  flds: &[ast::Name],
-                                  layout: TyLayout<'tcx>| {
-            let mut min_size = Size::from_bytes(0);
-            let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| {
-                match layout.field(cx, i) {
-                    Err(err) => {
-                        bug!("no layout found for field {}: `{:?}`", name, err);
-                    }
-                    Ok(field_layout) => {
-                        let offset = layout.fields.offset(i);
-                        let field_end = offset + field_layout.size;
-                        if min_size < field_end {
-                            min_size = field_end;
-                        }
-                        session::FieldInfo {
-                            name: name.to_string(),
-                            offset: offset.bytes(),
-                            size: field_layout.size.bytes(),
-                            align: field_layout.align.abi(),
-                        }
+        let build_field_info = |(field_name, field_ty): (ast::Name, Ty<'tcx>), offset: &Size| {
+            let layout = field_ty.layout(tcx, param_env);
+            match layout {
+                Err(_) => bug!("no layout found for field {} type: `{:?}`", field_name, field_ty),
+                Ok(field_layout) => {
+                    session::FieldInfo {
+                        name: field_name.to_string(),
+                        offset: offset.bytes(),
+                        size: field_layout.size(tcx).bytes(),
+                        align: field_layout.align(tcx).abi(),
                     }
                 }
-            }).collect();
+            }
+        };
+
+        let build_primitive_info = |name: ast::Name, value: &Primitive| {
+            session::VariantInfo {
+                name: Some(name.to_string()),
+                kind: session::SizeKind::Exact,
+                align: value.align(tcx).abi(),
+                size: value.size(tcx).bytes(),
+                fields: vec![],
+            }
+        };
+
+        enum Fields<'a> {
+            WithDiscrim(&'a Struct),
+            NoDiscrim(&'a Struct),
+        }
+
+        let build_variant_info = |n: Option<ast::Name>,
+                                  flds: &[(ast::Name, Ty<'tcx>)],
+                                  layout: Fields| {
+            let (s, field_offsets) = match layout {
+                Fields::WithDiscrim(s) => (s, &s.offsets[1..]),
+                Fields::NoDiscrim(s) => (s, &s.offsets[0..]),
+            };
+            let field_info: Vec<_> =
+                flds.iter()
+                    .zip(field_offsets.iter())
+                    .map(|(&field_name_ty, offset)| build_field_info(field_name_ty, offset))
+                    .collect();
 
             session::VariantInfo {
                 name: n.map(|n|n.to_string()),
-                kind: if layout.is_unsized() {
-                    session::SizeKind::Min
-                } else {
+                kind: if s.sized {
                     session::SizeKind::Exact
-                },
-                align: layout.align.abi(),
-                size: if min_size.bytes() == 0 {
-                    layout.size.bytes()
                 } else {
-                    min_size.bytes()
+                    session::SizeKind::Min
                 },
+                align: s.align.abi(),
+                size: s.min_size.bytes(),
                 fields: field_info,
             }
         };
 
-        match layout.variants {
-            Variants::Single { index } => {
-                debug!("print-type-size `{:#?}` variant {}",
-                       layout, adt_def.variants[index].name);
-                if !adt_def.variants.is_empty() {
-                    let variant_def = &adt_def.variants[index];
+        match *layout {
+            Layout::StructWrappedNullablePointer { nonnull: ref variant_layout,
+                                                   nndiscr,
+                                                   discrfield: _,
+                                                   discrfield_source: _ } => {
+                debug!("print-type-size t: `{:?}` adt struct-wrapped nullable nndiscr {} is {:?}",
+                       ty, nndiscr, variant_layout);
+                let variant_def = &adt_def.variants[nndiscr as usize];
+                let fields: Vec<_> =
+                    variant_def.fields.iter()
+                                      .map(|field_def| (field_def.name, field_def.ty(tcx, substs)))
+                                      .collect();
+                record(adt_kind.into(),
+                       None,
+                       vec![build_variant_info(Some(variant_def.name),
+                                               &fields,
+                                               Fields::NoDiscrim(variant_layout))]);
+            }
+            Layout::RawNullablePointer { nndiscr, value } => {
+                debug!("print-type-size t: `{:?}` adt raw nullable nndiscr {} is {:?}",
+                       ty, nndiscr, value);
+                let variant_def = &adt_def.variants[nndiscr as usize];
+                record(adt_kind.into(), None,
+                       vec![build_primitive_info(variant_def.name, &value)]);
+            }
+            Layout::Univariant { variant: ref variant_layout, non_zero: _ } => {
+                let variant_names = || {
+                    adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::<Vec<_>>()
+                };
+                debug!("print-type-size t: `{:?}` adt univariant {:?} variants: {:?}",
+                       ty, variant_layout, variant_names());
+                assert!(adt_def.variants.len() <= 1,
+                        "univariant with variants {:?}", variant_names());
+                if adt_def.variants.len() == 1 {
+                    let variant_def = &adt_def.variants[0];
                     let fields: Vec<_> =
-                        variant_def.fields.iter().map(|f| f.name).collect();
+                        variant_def.fields.iter()
+                                          .map(|f| (f.name, f.ty(tcx, substs)))
+                                          .collect();
                     record(adt_kind.into(),
                            None,
                            vec![build_variant_info(Some(variant_def.name),
                                                    &fields,
-                                                   layout)]);
+                                                   Fields::NoDiscrim(variant_layout))]);
                 } else {
                     // (This case arises for *empty* enums; so give it
                     // zero variants.)
@@ -1807,23 +1911,54 @@ impl<'a, 'tcx> LayoutDetails {
                 }
             }
 
-            Variants::NicheFilling { .. } |
-            Variants::Tagged { .. } => {
-                debug!("print-type-size `{:#?}` adt general variants def {}",
-                       ty, adt_def.variants.len());
+            Layout::General { ref variants, discr, .. } => {
+                debug!("print-type-size t: `{:?}` adt general variants def {} layouts {} {:?}",
+                       ty, adt_def.variants.len(), variants.len(), variants);
                 let variant_infos: Vec<_> =
-                    adt_def.variants.iter().enumerate().map(|(i, variant_def)| {
-                        let fields: Vec<_> =
-                            variant_def.fields.iter().map(|f| f.name).collect();
-                        build_variant_info(Some(variant_def.name),
-                                            &fields,
-                                            layout.for_variant(cx, i))
-                    })
-                    .collect();
-                record(adt_kind.into(), match layout.variants {
-                    Variants::Tagged { ref discr, .. } => Some(discr.value.size(tcx)),
-                    _ => None
-                }, variant_infos);
+                    adt_def.variants.iter()
+                                    .zip(variants.iter())
+                                    .map(|(variant_def, variant_layout)| {
+                                        let fields: Vec<_> =
+                                            variant_def.fields
+                                                       .iter()
+                                                       .map(|f| (f.name, f.ty(tcx, substs)))
+                                                       .collect();
+                                        build_variant_info(Some(variant_def.name),
+                                                           &fields,
+                                                           Fields::WithDiscrim(variant_layout))
+                                    })
+                                    .collect();
+                record(adt_kind.into(), Some(discr.size()), variant_infos);
+            }
+
+            Layout::UntaggedUnion { ref variants } => {
+                debug!("print-type-size t: `{:?}` adt union variants {:?}",
+                       ty, variants);
+                // layout does not currently store info about each
+                // variant...
+                record(adt_kind.into(), None, Vec::new());
+            }
+
+            Layout::CEnum { discr, .. } => {
+                debug!("print-type-size t: `{:?}` adt c-like enum", ty);
+                let variant_infos: Vec<_> =
+                    adt_def.variants.iter()
+                                    .map(|variant_def| {
+                                        build_primitive_info(variant_def.name,
+                                                             &Primitive::Int(discr))
+                                    })
+                                    .collect();
+                record(adt_kind.into(), Some(discr.size()), variant_infos);
+            }
+
+            // other cases provide little interesting (i.e. adjustable
+            // via representation tweaks) size info beyond total size.
+            Layout::Scalar { .. } |
+            Layout::Vector { .. } |
+            Layout::Array { .. } |
+            Layout::FatPointer { .. } => {
+                debug!("print-type-size t: `{:?}` adt other", ty);
+                record(adt_kind.into(), None, Vec::new())
             }
         }
     }
@@ -1857,32 +1992,39 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> {
         assert!(!ty.has_infer_types());
 
         // First try computing a static layout.
-        let err = match (tcx, param_env).layout_of(ty) {
+        let err = match ty.layout(tcx, param_env) {
             Ok(layout) => {
-                return Ok(SizeSkeleton::Known(layout.size));
+                return Ok(SizeSkeleton::Known(layout.size(tcx)));
             }
             Err(err) => err
         };
 
+        let ptr_skeleton = |pointee: Ty<'tcx>| {
+            let non_zero = !ty.is_unsafe_ptr();
+            let tail = tcx.struct_tail(pointee);
+            match tail.sty {
+                ty::TyParam(_) | ty::TyProjection(_) => {
+                    assert!(tail.has_param_types() || tail.has_self_ty());
+                    Ok(SizeSkeleton::Pointer {
+                        non_zero,
+                        tail: tcx.erase_regions(&tail)
+                    })
+                }
+                _ => {
+                    bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
+                            tail `{}` is not a type parameter or a projection",
+                            ty, err, tail)
+                }
+            }
+        };
+
         match ty.sty {
             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
-                let non_zero = !ty.is_unsafe_ptr();
-                let tail = tcx.struct_tail(pointee);
-                match tail.sty {
-                    ty::TyParam(_) | ty::TyProjection(_) => {
-                        assert!(tail.has_param_types() || tail.has_self_ty());
-                        Ok(SizeSkeleton::Pointer {
-                            non_zero,
-                            tail: tcx.erase_regions(&tail)
-                        })
-                    }
-                    _ => {
-                        bug!("SizeSkeleton::compute({}): layout errored ({}), yet \
-                              tail `{}` is not a type parameter or a projection",
-                             ty, err, tail)
-                    }
-                }
+                ptr_skeleton(pointee)
+            }
+            ty::TyAdt(def, _) if def.is_box() => {
+                ptr_skeleton(ty.boxed_ty())
             }
 
             ty::TyAdt(def, substs) => {
@@ -1967,184 +2109,142 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> {
     }
 }
 
-/// The details of the layout of a type, alongside the type itself.
-/// Provides various type traversal APIs (e.g. recursing into fields).
-///
-/// Note that the details are NOT guaranteed to always be identical
-/// to those obtained from `layout_of(ty)`, as we need to produce
-/// layouts for which Rust types do not exist, such as enum variants
-/// or synthetic fields of enums (i.e. discriminants) and fat pointers.
+/// A pair of a type and its layout. Implements various
+/// type traversal APIs (e.g. recursing into fields).
 #[derive(Copy, Clone, Debug)]
 pub struct TyLayout<'tcx> {
     pub ty: Ty<'tcx>,
-    details: &'tcx LayoutDetails
+    pub layout: &'tcx Layout,
+    pub variant_index: Option<usize>,
 }
 
 impl<'tcx> Deref for TyLayout<'tcx> {
-    type Target = &'tcx LayoutDetails;
-    fn deref(&self) -> &&'tcx LayoutDetails {
-        &self.details
+    type Target = Layout;
+    fn deref(&self) -> &Layout {
+        self.layout
     }
 }
 
-pub trait HasTyCtxt<'tcx>: HasDataLayout {
+pub trait LayoutTyper<'tcx>: HasDataLayout {
+    type TyLayout;
+
     fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
+    fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout;
+    fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx>;
 }
 
-impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
-    fn data_layout(&self) -> &TargetDataLayout {
-        &self.data_layout
-    }
+/// Combines a tcx with the parameter environment so that you can
+/// compute layout operations.
+#[derive(Copy, Clone)]
+pub struct LayoutCx<'a, 'tcx: 'a> {
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+    param_env: ty::ParamEnv<'tcx>,
 }
 
-impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
-    fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
-        self.global_tcx()
+impl<'a, 'tcx> LayoutCx<'a, 'tcx> {
+    pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
+        LayoutCx { tcx, param_env }
     }
 }
 
-impl<'a, 'gcx, 'tcx, T: Copy> HasDataLayout for (TyCtxt<'a, 'gcx, 'tcx>, T) {
+impl<'a, 'tcx> HasDataLayout for LayoutCx<'a, 'tcx> {
     fn data_layout(&self) -> &TargetDataLayout {
-        self.0.data_layout()
+        &self.tcx.data_layout
     }
 }
 
-impl<'a, 'gcx, 'tcx, T: Copy> HasTyCtxt<'gcx> for (TyCtxt<'a, 'gcx, 'tcx>, T) {
-    fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
-        self.0.tcx()
-    }
-}
-
-pub trait MaybeResult<T> {
-    fn from_ok(x: T) -> Self;
-    fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self;
-}
-
-impl<T> MaybeResult<T> for T {
-    fn from_ok(x: T) -> Self {
-        x
-    }
-    fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
-        f(self)
-    }
-}
+impl<'a, 'tcx> LayoutTyper<'tcx> for LayoutCx<'a, 'tcx> {
+    type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
 
-impl<T, E> MaybeResult<T> for Result<T, E> {
-    fn from_ok(x: T) -> Self {
-        Ok(x)
-    }
-    fn map_same<F: FnOnce(T) -> T>(self, f: F) -> Self {
-        self.map(f)
+    fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
+        self.tcx
     }
-}
-
-pub trait LayoutOf<T> {
-    type TyLayout;
-
-    fn layout_of(self, ty: T) -> Self::TyLayout;
-}
 
-impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) {
-    type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
-
-    /// Computes the layout of a type. Note that this implicitly
-    /// executes in "reveal all" mode.
-    #[inline]
     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
-        let (tcx, param_env) = self;
+        let ty = self.normalize_projections(ty);
 
-        let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all());
-        let details = tcx.layout_raw(param_env.reveal_all().and(ty))?;
-        let layout = TyLayout {
+        Ok(TyLayout {
             ty,
-            details
-        };
-
-        // NB: This recording is normally disabled; when enabled, it
-        // can however trigger recursive invocations of `layout_of`.
-        // Therefore, we execute it *after* the main query has
-        // completed, to avoid problems around recursive structures
-        // and the like. (Admitedly, I wasn't able to reproduce a problem
-        // here, but it seems like the right thing to do. -nmatsakis)
-        LayoutDetails::record_layout_for_printing(tcx, ty, param_env, layout);
+            layout: ty.layout(self.tcx, self.param_env)?,
+            variant_index: None
+        })
+    }
 
-        Ok(layout)
+    fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.tcx.normalize_associated_type_in_env(&ty, self.param_env)
     }
 }
 
-impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>,
-                                       ty::ParamEnv<'tcx>) {
-    type TyLayout = Result<TyLayout<'tcx>, LayoutError<'tcx>>;
-
-    /// Computes the layout of a type. Note that this implicitly
-    /// executes in "reveal all" mode.
-    #[inline]
-    fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
-        let (tcx_at, param_env) = self;
-
-        let ty = tcx_at.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all());
-        let details = tcx_at.layout_raw(param_env.reveal_all().and(ty))?;
-        let layout = TyLayout {
-            ty,
-            details
-        };
-
-        // NB: This recording is normally disabled; when enabled, it
-        // can however trigger recursive invocations of `layout_of`.
-        // Therefore, we execute it *after* the main query has
-        // completed, to avoid problems around recursive structures
-        // and the like. (Admitedly, I wasn't able to reproduce a problem
-        // here, but it seems like the right thing to do. -nmatsakis)
-        LayoutDetails::record_layout_for_printing(tcx_at.tcx, ty, param_env, layout);
+impl<'a, 'tcx> TyLayout<'tcx> {
+    pub fn for_variant(&self, variant_index: usize) -> Self {
+        TyLayout {
+            variant_index: Some(variant_index),
+            ..*self
+        }
+    }
 
-        Ok(layout)
+    pub fn field_offset<C: HasDataLayout>(&self, cx: C, i: usize) -> Size {
+        self.layout.field_offset(cx, i, self.variant_index)
     }
-}
 
-impl<'a, 'tcx> TyLayout<'tcx> {
-    pub fn for_variant<C>(&self, cx: C, variant_index: usize) -> Self
-        where C: LayoutOf<Ty<'tcx>> + HasTyCtxt<'tcx>,
-              C::TyLayout: MaybeResult<TyLayout<'tcx>>
-    {
-        let details = match self.variants {
-            Variants::Single { index } if index == variant_index => self.details,
-
-            Variants::Single { index } => {
-                // Deny calling for_variant more than once for non-Single enums.
-                cx.layout_of(self.ty).map_same(|layout| {
-                    assert_eq!(layout.variants, Variants::Single { index });
-                    layout
-                });
-
-                let fields = match self.ty.sty {
-                    ty::TyAdt(def, _) => def.variants[variant_index].fields.len(),
-                    _ => bug!()
-                };
-                let mut details = LayoutDetails::uninhabited(fields);
-                details.variants = Variants::Single { index: variant_index };
-                cx.tcx().intern_layout(details)
+    pub fn field_count(&self) -> usize {
+        // Handle enum/union through the type rather than Layout.
+        if let ty::TyAdt(def, _) = self.ty.sty {
+            let v = self.variant_index.unwrap_or(0);
+            if def.variants.is_empty() {
+                assert_eq!(v, 0);
+                return 0;
+            } else {
+                return def.variants[v].fields.len();
             }
+        }
 
-            Variants::NicheFilling { ref variants, .. } |
-            Variants::Tagged { ref variants, .. } => {
-                &variants[variant_index]
+        match *self.layout {
+            Scalar { .. } => {
+                bug!("TyLayout::field_count({:?}): not applicable", self)
             }
-        };
 
-        assert_eq!(details.variants, Variants::Single { index: variant_index });
+            // Handled above (the TyAdt case).
+            CEnum { .. } |
+            General { .. } |
+            UntaggedUnion { .. } |
+            RawNullablePointer { .. } |
+            StructWrappedNullablePointer { .. } => bug!(),
 
-        TyLayout {
-            ty: self.ty,
-            details
+            FatPointer { .. } => 2,
+
+            Vector { count, .. } |
+            Array { count, .. } => {
+                let usize_count = count as usize;
+                assert_eq!(usize_count as u64, count);
+                usize_count
+            }
+
+            Univariant { ref variant, .. } => variant.offsets.len(),
         }
     }
 
-    pub fn field<C>(&self, cx: C, i: usize) -> C::TyLayout
-        where C: LayoutOf<Ty<'tcx>> + HasTyCtxt<'tcx>,
-              C::TyLayout: MaybeResult<TyLayout<'tcx>>
-    {
+    pub fn field_type<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> Ty<'tcx> {
         let tcx = cx.tcx();
-        cx.layout_of(match self.ty.sty {
+
+        let ptr_field_type = |pointee: Ty<'tcx>| {
+            assert!(i < 2);
+            let slice = |element: Ty<'tcx>| {
+                if i == 0 {
+                    tcx.mk_mut_ptr(element)
+                } else {
+                    tcx.types.usize
+                }
+            };
+            match tcx.struct_tail(pointee).sty {
+                ty::TySlice(element) => slice(element),
+                ty::TyStr => slice(tcx.types.u8),
+                ty::TyDynamic(..) => tcx.mk_mut_ptr(tcx.mk_nil()),
+                _ => bug!("TyLayout::field_type({:?}): not applicable", self)
+            }
+        };
+
+        match self.ty.sty {
             ty::TyBool |
             ty::TyChar |
             ty::TyInt(_) |
@@ -2161,35 +2261,10 @@ impl<'a, 'tcx> TyLayout<'tcx> {
             // Potentially-fat pointers.
             ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
             ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
-                assert!(i < 2);
-
-                // Reuse the fat *T type as its own thin pointer data field.
-                // This provides information about e.g. DST struct pointees
-                // (which may have no non-DST form), and will work as long
-                // as the `Abi` or `FieldPlacement` is checked by users.
-                if i == 0 {
-                    let nil = tcx.mk_nil();
-                    let ptr_ty = if self.ty.is_unsafe_ptr() {
-                        tcx.mk_mut_ptr(nil)
-                    } else {
-                        tcx.mk_mut_ref(tcx.types.re_static, nil)
-                    };
-                    return cx.layout_of(ptr_ty).map_same(|mut ptr_layout| {
-                        ptr_layout.ty = self.ty;
-                        ptr_layout
-                    });
-                }
-
-                match tcx.struct_tail(pointee).sty {
-                    ty::TySlice(_) |
-                    ty::TyStr => tcx.types.usize,
-                    ty::TyDynamic(..) => {
-                        // FIXME(eddyb) use an usize/fn() array with
-                        // the correct number of vtables slots.
-                        tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil())
-                    }
-                    _ => bug!("TyLayout::field_type({:?}): not applicable", self)
-                }
+                ptr_field_type(pointee)
+            }
+            ty::TyAdt(def, _) if def.is_box() => {
+                ptr_field_type(self.ty.boxed_ty())
             }
 
             // Arrays and slices.
@@ -2215,232 +2290,94 @@ impl<'a, 'tcx> TyLayout<'tcx> {
 
             // ADTs.
             ty::TyAdt(def, substs) => {
-                match self.variants {
-                    Variants::Single { index } => {
-                        def.variants[index].fields[i].ty(tcx, substs)
-                    }
-
-                    // Discriminant field for enums (where applicable).
-                    Variants::Tagged { ref discr, .. } |
-                    Variants::NicheFilling { niche: ref discr, .. } => {
-                        assert_eq!(i, 0);
-                        let layout = LayoutDetails::scalar(tcx, discr.clone());
-                        return MaybeResult::from_ok(TyLayout {
-                            details: tcx.intern_layout(layout),
-                            ty: discr.value.to_ty(tcx)
-                        });
-                    }
-                }
+                def.variants[self.variant_index.unwrap_or(0)].fields[i].ty(tcx, substs)
             }
 
             ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
             ty::TyInfer(_) | ty::TyError => {
                 bug!("TyLayout::field_type: unexpected type `{}`", self.ty)
             }
-        })
-    }
-
-    /// Returns true if the layout corresponds to an unsized type.
-    pub fn is_unsized(&self) -> bool {
-        self.abi.is_unsized()
-    }
-
-    /// Returns true if the fields of the layout are packed.
-    pub fn is_packed(&self) -> bool {
-        self.abi.is_packed()
-    }
-
-    /// Returns true if the type is a ZST and not unsized.
-    pub fn is_zst(&self) -> bool {
-        match self.abi {
-            Abi::Uninhabited => true,
-            Abi::Scalar(_) | Abi::ScalarPair(..) => false,
-            Abi::Vector => self.size.bytes() == 0,
-            Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0
         }
     }
 
-    pub fn size_and_align(&self) -> (Size, Align) {
-        (self.size, self.align)
-    }
-
-    /// Find the offset of a niche leaf field, starting from
-    /// the given type and recursing through aggregates, which
-    /// has at least `count` consecutive invalid values.
-    /// The tuple is `(offset, scalar, niche_value)`.
-    // FIXME(eddyb) traverse already optimized enums.
-    fn find_niche<C>(&self, cx: C, count: u128)
-        -> Result<Option<(Size, Scalar, u128)>, LayoutError<'tcx>>
-        where C: LayoutOf<Ty<'tcx>, TyLayout = Result<Self, LayoutError<'tcx>>> +
-                 HasTyCtxt<'tcx>
-    {
-        let scalar_component = |scalar: &Scalar, offset| {
-            let Scalar { value, valid_range: ref v } = *scalar;
-
-            let bits = value.size(cx).bits();
-            assert!(bits <= 128);
-            let max_value = !0u128 >> (128 - bits);
-
-            // Find out how many values are outside the valid range.
-            let niches = if v.start <= v.end {
-                v.start + (max_value - v.end)
-            } else {
-                v.start - v.end - 1
-            };
-
-            // Give up if we can't fit `count` consecutive niches.
-            if count > niches {
-                return None;
-            }
-
-            let niche_start = v.end.wrapping_add(1) & max_value;
-            let niche_end = v.end.wrapping_add(count) & max_value;
-            Some((offset, Scalar {
-                value,
-                valid_range: v.start..=niche_end
-            }, niche_start))
-        };
-
-        match self.abi {
-            Abi::Scalar(ref scalar) => {
-                return Ok(scalar_component(scalar, Size::from_bytes(0)));
-            }
-            Abi::ScalarPair(ref a, ref b) => {
-                return Ok(scalar_component(a, Size::from_bytes(0)).or_else(|| {
-                    scalar_component(b, a.value.size(cx).abi_align(b.value.align(cx)))
-                }));
-            }
-            _ => {}
-        }
-
-        // Perhaps one of the fields is non-zero, let's recurse and find out.
-        if let FieldPlacement::Union(_) = self.fields {
-            // Only Rust enums have safe-to-inspect fields
-            // (a discriminant), other unions are unsafe.
-            if let Variants::Single { .. } = self.variants {
-                return Ok(None);
-            }
-        }
-        if let FieldPlacement::Array { .. } = self.fields {
-            if self.fields.count() > 0 {
-                return self.field(cx, 0)?.find_niche(cx, count);
-            }
-        }
-        for i in 0..self.fields.count() {
-            let r = self.field(cx, i)?.find_niche(cx, count)?;
-            if let Some((offset, scalar, niche_value)) = r {
-                let offset = self.fields.offset(i) + offset;
-                return Ok(Some((offset, scalar, niche_value)));
-            }
-        }
-        Ok(None)
+    pub fn field<C: LayoutTyper<'tcx>>(&self,
+                                       cx: C,
+                                       i: usize)
+                                       -> C::TyLayout {
+        cx.layout_of(cx.normalize_projections(self.field_type(cx, i)))
     }
 }
 
-impl<'gcx> HashStable<StableHashingContext<'gcx>> for Variants {
+impl<'gcx> HashStable<StableHashingContext<'gcx>> for Layout
+{
     fn hash_stable<W: StableHasherResult>(&self,
                                           hcx: &mut StableHashingContext<'gcx>,
                                           hasher: &mut StableHasher<W>) {
-        use ty::layout::Variants::*;
+        use ty::layout::Layout::*;
         mem::discriminant(self).hash_stable(hcx, hasher);
 
         match *self {
-            Single { index } => {
-                index.hash_stable(hcx, hasher);
-            }
-            Tagged {
-                ref discr,
-                ref variants,
-            } => {
-                discr.hash_stable(hcx, hasher);
-                variants.hash_stable(hcx, hasher);
-            }
-            NicheFilling {
-                dataful_variant,
-                niche_variants: RangeInclusive { start, end },
-                ref niche,
-                niche_start,
-                ref variants,
-            } => {
-                dataful_variant.hash_stable(hcx, hasher);
-                start.hash_stable(hcx, hasher);
-                end.hash_stable(hcx, hasher);
-                niche.hash_stable(hcx, hasher);
-                niche_start.hash_stable(hcx, hasher);
-                variants.hash_stable(hcx, hasher);
+            Scalar { value, non_zero } => {
+                value.hash_stable(hcx, hasher);
+                non_zero.hash_stable(hcx, hasher);
             }
-        }
-    }
-}
-
-impl<'gcx> HashStable<StableHashingContext<'gcx>> for FieldPlacement {
-    fn hash_stable<W: StableHasherResult>(&self,
-                                          hcx: &mut StableHashingContext<'gcx>,
-                                          hasher: &mut StableHasher<W>) {
-        use ty::layout::FieldPlacement::*;
-        mem::discriminant(self).hash_stable(hcx, hasher);
-
-        match *self {
-            Union(count) => {
+            Vector { element, count } => {
+                element.hash_stable(hcx, hasher);
                 count.hash_stable(hcx, hasher);
             }
-            Array { count, stride } => {
+            Array { sized, align, primitive_align, element_size, count } => {
+                sized.hash_stable(hcx, hasher);
+                align.hash_stable(hcx, hasher);
+                primitive_align.hash_stable(hcx, hasher);
+                element_size.hash_stable(hcx, hasher);
                 count.hash_stable(hcx, hasher);
-                stride.hash_stable(hcx, hasher);
             }
-            Arbitrary { ref offsets, ref memory_index } => {
-                offsets.hash_stable(hcx, hasher);
-                memory_index.hash_stable(hcx, hasher);
+            FatPointer { ref metadata, non_zero } => {
+                metadata.hash_stable(hcx, hasher);
+                non_zero.hash_stable(hcx, hasher);
             }
-        }
-    }
-}
-
-impl<'gcx> HashStable<StableHashingContext<'gcx>> for Abi {
-    fn hash_stable<W: StableHasherResult>(&self,
-                                          hcx: &mut StableHashingContext<'gcx>,
-                                          hasher: &mut StableHasher<W>) {
-        use ty::layout::Abi::*;
-        mem::discriminant(self).hash_stable(hcx, hasher);
-
-        match *self {
-            Uninhabited => {}
-            Scalar(ref value) => {
-                value.hash_stable(hcx, hasher);
+            CEnum { discr, signed, non_zero, min, max } => {
+                discr.hash_stable(hcx, hasher);
+                signed.hash_stable(hcx, hasher);
+                non_zero.hash_stable(hcx, hasher);
+                min.hash_stable(hcx, hasher);
+                max.hash_stable(hcx, hasher);
             }
-            ScalarPair(ref a, ref b) => {
-                a.hash_stable(hcx, hasher);
-                b.hash_stable(hcx, hasher);
+            Univariant { ref variant, non_zero } => {
+                variant.hash_stable(hcx, hasher);
+                non_zero.hash_stable(hcx, hasher);
             }
-            Vector => {}
-            Aggregate { packed, sized } => {
-                packed.hash_stable(hcx, hasher);
-                sized.hash_stable(hcx, hasher);
+            UntaggedUnion { ref variants } => {
+                variants.hash_stable(hcx, hasher);
+            }
+            General { discr, ref variants, size, align, primitive_align } => {
+                discr.hash_stable(hcx, hasher);
+                variants.hash_stable(hcx, hasher);
+                size.hash_stable(hcx, hasher);
+                align.hash_stable(hcx, hasher);
+                primitive_align.hash_stable(hcx, hasher);
+            }
+            RawNullablePointer { nndiscr, ref value } => {
+                nndiscr.hash_stable(hcx, hasher);
+                value.hash_stable(hcx, hasher);
+            }
+            StructWrappedNullablePointer {
+                nndiscr,
+                ref nonnull,
+                ref discrfield,
+                ref discrfield_source
+            } => {
+                nndiscr.hash_stable(hcx, hasher);
+                nonnull.hash_stable(hcx, hasher);
+                discrfield.hash_stable(hcx, hasher);
+                discrfield_source.hash_stable(hcx, hasher);
             }
         }
     }
 }
 
-impl<'gcx> HashStable<StableHashingContext<'gcx>> for Scalar {
-    fn hash_stable<W: StableHasherResult>(&self,
-                                          hcx: &mut StableHashingContext<'gcx>,
-                                          hasher: &mut StableHasher<W>) {
-        let Scalar { value, valid_range: RangeInclusive { start, end } } = *self;
-        value.hash_stable(hcx, hasher);
-        start.hash_stable(hcx, hasher);
-        end.hash_stable(hcx, hasher);
-    }
-}
-
-impl_stable_hash_for!(struct ::ty::layout::LayoutDetails {
-    variants,
-    fields,
-    abi,
-    size,
-    align
-});
-
 impl_stable_hash_for!(enum ::ty::layout::Integer {
+    I1,
     I8,
     I16,
     I32,
@@ -2449,7 +2386,7 @@ impl_stable_hash_for!(enum ::ty::layout::Integer {
 });
 
 impl_stable_hash_for!(enum ::ty::layout::Primitive {
-    Int(integer, signed),
+    Int(integer),
     F32,
     F64,
     Pointer
@@ -2478,3 +2415,20 @@ impl<'gcx> HashStable<StableHashingContext<'gcx>> for LayoutError<'gcx>
         }
     }
 }
+
+impl_stable_hash_for!(struct ::ty::layout::Struct {
+    align,
+    primitive_align,
+    packed,
+    sized,
+    offsets,
+    memory_index,
+    min_size
+});
+
+impl_stable_hash_for!(struct ::ty::layout::Union {
+    align,
+    primitive_align,
+    min_size,
+    packed
+});
diff --git a/src/librustc/ty/maps/mod.rs b/src/librustc/ty/maps/mod.rs
index 2f648e8d3ff..320f6514849 100644
--- a/src/librustc/ty/maps/mod.rs
+++ b/src/librustc/ty/maps/mod.rs
@@ -34,6 +34,7 @@ use session::config::OutputFilenames;
 use traits::Vtable;
 use traits::specialization_graph;
 use ty::{self, CrateInherentImpls, Ty, TyCtxt};
+use ty::layout::{Layout, LayoutError};
 use ty::steal::Steal;
 use ty::subst::Substs;
 use util::nodemap::{DefIdSet, DefIdMap, ItemLocalSet};
@@ -264,8 +265,7 @@ define_maps! { <'tcx>
     [] fn is_freeze_raw: is_freeze_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool,
     [] fn needs_drop_raw: needs_drop_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool,
     [] fn layout_raw: layout_dep_node(ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
-                                  -> Result<&'tcx ty::layout::LayoutDetails,
-                                            ty::layout::LayoutError<'tcx>>,
+                                  -> Result<&'tcx Layout, LayoutError<'tcx>>,
 
     [] fn dylib_dependency_formats: DylibDepFormats(CrateNum)
                                     -> Rc<Vec<(CrateNum, LinkagePreference)>>,
diff --git a/src/librustc/ty/mod.rs b/src/librustc/ty/mod.rs
index 48ec92a255b..a584f2ce191 100644
--- a/src/librustc/ty/mod.rs
+++ b/src/librustc/ty/mod.rs
@@ -1674,6 +1674,11 @@ impl<'a, 'gcx, 'tcx> AdtDef {
         self.variants.iter().flat_map(|v| v.fields.iter())
     }
 
+    #[inline]
+    pub fn is_univariant(&self) -> bool {
+        self.variants.len() == 1
+    }
+
     pub fn is_payloadfree(&self) -> bool {
         !self.variants.is_empty() &&
             self.variants.iter().all(|v| v.fields.is_empty())
@@ -2617,10 +2622,9 @@ fn original_crate_name<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
 }
 
 pub fn provide(providers: &mut ty::maps::Providers) {
+    util::provide(providers);
     context::provide(providers);
     erase_regions::provide(providers);
-    layout::provide(providers);
-    util::provide(providers);
     *providers = ty::maps::Providers {
         associated_item,
         associated_item_def_ids,
diff --git a/src/librustc/ty/util.rs b/src/librustc/ty/util.rs
index 23dd3f1bc2b..a0219f2f95b 100644
--- a/src/librustc/ty/util.rs
+++ b/src/librustc/ty/util.rs
@@ -19,6 +19,7 @@ use middle::const_val::ConstVal;
 use traits::{self, Reveal};
 use ty::{self, Ty, TyCtxt, TypeFoldable};
 use ty::fold::TypeVisitor;
+use ty::layout::{Layout, LayoutError};
 use ty::subst::{Subst, Kind};
 use ty::TypeVariants::*;
 use util::common::ErrorReported;
@@ -851,6 +852,30 @@ impl<'a, 'tcx> ty::TyS<'tcx> {
         tcx.needs_drop_raw(param_env.and(self))
     }
 
+    /// Computes the layout of a type. Note that this implicitly
+    /// executes in "reveal all" mode.
+    #[inline]
+    pub fn layout<'lcx>(&'tcx self,
+                        tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                        param_env: ty::ParamEnv<'tcx>)
+                        -> Result<&'tcx Layout, LayoutError<'tcx>> {
+        let ty = tcx.erase_regions(&self);
+        let layout = tcx.layout_raw(param_env.reveal_all().and(ty));
+
+        // NB: This recording is normally disabled; when enabled, it
+        // can however trigger recursive invocations of `layout()`.
+        // Therefore, we execute it *after* the main query has
+        // completed, to avoid problems around recursive structures
+        // and the like. (Admitedly, I wasn't able to reproduce a problem
+        // here, but it seems like the right thing to do. -nmatsakis)
+        if let Ok(l) = layout {
+            Layout::record_layout_for_printing(tcx, ty, param_env, l);
+        }
+
+        layout
+    }
+
+
     /// Check whether a type is representable. This means it cannot contain unboxed
     /// structural recursion. This check is needed for structs and enums.
     pub fn is_representable(&'tcx self,
@@ -1159,6 +1184,26 @@ fn needs_drop_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
     }
 }
 
+fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                        query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>)
+                        -> Result<&'tcx Layout, LayoutError<'tcx>>
+{
+    let (param_env, ty) = query.into_parts();
+
+    let rec_limit = tcx.sess.recursion_limit.get();
+    let depth = tcx.layout_depth.get();
+    if depth > rec_limit {
+        tcx.sess.fatal(
+            &format!("overflow representing the type `{}`", ty));
+    }
+
+    tcx.layout_depth.set(depth+1);
+    let layout = Layout::compute_uncached(tcx, param_env, ty);
+    tcx.layout_depth.set(depth);
+
+    layout
+}
+
 pub enum ExplicitSelf<'tcx> {
     ByValue,
     ByReference(ty::Region<'tcx>, hir::Mutability),
@@ -1217,6 +1262,7 @@ pub fn provide(providers: &mut ty::maps::Providers) {
         is_sized_raw,
         is_freeze_raw,
         needs_drop_raw,
+        layout_raw,
         ..*providers
     };
 }
diff --git a/src/librustc_const_eval/_match.rs b/src/librustc_const_eval/_match.rs
index 33d9bfa6e6b..6ebe3c67966 100644
--- a/src/librustc_const_eval/_match.rs
+++ b/src/librustc_const_eval/_match.rs
@@ -255,7 +255,7 @@ impl<'tcx> Constructor<'tcx> {
         match self {
             &Variant(vid) => adt.variant_index_with_id(vid),
             &Single => {
-                assert!(!adt.is_enum());
+                assert_eq!(adt.variants.len(), 1);
                 0
             }
             _ => bug!("bad constructor {:?} for adt {:?}", self, adt)
@@ -356,7 +356,7 @@ impl<'tcx> Witness<'tcx> {
                     }).collect();
 
                     if let ty::TyAdt(adt, substs) = ty.sty {
-                        if adt.is_enum() {
+                        if adt.variants.len() > 1 {
                             PatternKind::Variant {
                                 adt_def: adt,
                                 substs,
@@ -444,7 +444,7 @@ fn all_constructors<'a, 'tcx: 'a>(cx: &mut MatchCheckCtxt<'a, 'tcx>,
                 (0..pcx.max_slice_length+1).map(|length| Slice(length)).collect()
             }
         }
-        ty::TyAdt(def, substs) if def.is_enum() => {
+        ty::TyAdt(def, substs) if def.is_enum() && def.variants.len() != 1 => {
             def.variants.iter()
                 .filter(|v| !cx.is_variant_uninhabited(v, substs))
                 .map(|v| Variant(v.did))
diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs
index a548c1df16e..657156902b5 100644
--- a/src/librustc_const_eval/eval.rs
+++ b/src/librustc_const_eval/eval.rs
@@ -17,7 +17,6 @@ use rustc::hir::map::blocks::FnLikeNode;
 use rustc::hir::def::{Def, CtorKind};
 use rustc::hir::def_id::DefId;
 use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::layout::LayoutOf;
 use rustc::ty::maps::Providers;
 use rustc::ty::util::IntTypeExt;
 use rustc::ty::subst::{Substs, Subst};
@@ -314,18 +313,18 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>,
           if tcx.fn_sig(def_id).abi() == Abi::RustIntrinsic {
             let layout_of = |ty: Ty<'tcx>| {
                 let ty = tcx.erase_regions(&ty);
-                (tcx.at(e.span), cx.param_env).layout_of(ty).map_err(|err| {
+                tcx.at(e.span).layout_raw(cx.param_env.reveal_all().and(ty)).map_err(|err| {
                     ConstEvalErr { span: e.span, kind: LayoutError(err) }
                 })
             };
             match &tcx.item_name(def_id)[..] {
                 "size_of" => {
-                    let size = layout_of(substs.type_at(0))?.size.bytes();
+                    let size = layout_of(substs.type_at(0))?.size(tcx).bytes();
                     return Ok(mk_const(Integral(Usize(ConstUsize::new(size,
                         tcx.sess.target.usize_ty).unwrap()))));
                 }
                 "min_align_of" => {
-                    let align = layout_of(substs.type_at(0))?.align.abi();
+                    let align = layout_of(substs.type_at(0))?.align(tcx).abi();
                     return Ok(mk_const(Integral(Usize(ConstUsize::new(align,
                         tcx.sess.target.usize_ty).unwrap()))));
                 }
diff --git a/src/librustc_const_eval/pattern.rs b/src/librustc_const_eval/pattern.rs
index cfbb9623f7d..d7a16e9d2fc 100644
--- a/src/librustc_const_eval/pattern.rs
+++ b/src/librustc_const_eval/pattern.rs
@@ -150,7 +150,7 @@ impl<'tcx> fmt::Display for Pattern<'tcx> {
                         Some(&adt_def.variants[variant_index])
                     }
                     _ => if let ty::TyAdt(adt, _) = self.ty.sty {
-                        if !adt.is_enum() {
+                        if adt.is_univariant() {
                             Some(&adt.variants[0])
                         } else {
                             None
@@ -598,7 +598,7 @@ impl<'a, 'tcx> PatternContext<'a, 'tcx> {
             Def::Variant(variant_id) | Def::VariantCtor(variant_id, ..) => {
                 let enum_id = self.tcx.parent_def_id(variant_id).unwrap();
                 let adt_def = self.tcx.adt_def(enum_id);
-                if adt_def.is_enum() {
+                if adt_def.variants.len() > 1 {
                     let substs = match ty.sty {
                         ty::TyAdt(_, substs) |
                         ty::TyFnDef(_, substs) => substs,
diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs
index 1356574f646..8f08987505b 100644
--- a/src/librustc_lint/types.rs
+++ b/src/librustc_lint/types.rs
@@ -13,7 +13,7 @@
 use rustc::hir::def_id::DefId;
 use rustc::ty::subst::Substs;
 use rustc::ty::{self, AdtKind, Ty, TyCtxt};
-use rustc::ty::layout::{self, LayoutOf};
+use rustc::ty::layout::{Layout, Primitive};
 use middle::const_val::ConstVal;
 use rustc_const_eval::ConstContext;
 use util::nodemap::FxHashSet;
@@ -748,23 +748,25 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences {
                 // sizes only make sense for non-generic types
                 let item_def_id = cx.tcx.hir.local_def_id(it.id);
                 let t = cx.tcx.type_of(item_def_id);
+                let param_env = cx.param_env.reveal_all();
                 let ty = cx.tcx.erase_regions(&t);
-                let layout = cx.layout_of(ty).unwrap_or_else(|e| {
+                let layout = ty.layout(cx.tcx, param_env).unwrap_or_else(|e| {
                     bug!("failed to get layout for `{}`: {}", t, e)
                 });
 
-                if let layout::Variants::Tagged { ref variants, ref discr, .. } = layout.variants {
-                    let discr_size = discr.value.size(cx.tcx).bytes();
+                if let Layout::General { ref variants, ref size, discr, .. } = *layout {
+                    let discr_size = Primitive::Int(discr).size(cx.tcx).bytes();
 
                     debug!("enum `{}` is {} bytes large with layout:\n{:#?}",
-                      t, layout.size.bytes(), layout);
+                      t, size.bytes(), layout);
 
                     let (largest, slargest, largest_index) = enum_definition.variants
                         .iter()
                         .zip(variants)
                         .map(|(variant, variant_layout)| {
                             // Subtract the size of the enum discriminant
-                            let bytes = variant_layout.size.bytes()
+                            let bytes = variant_layout.min_size
+                                .bytes()
                                 .saturating_sub(discr_size);
 
                             debug!("- variant `{}` is {} bytes large", variant.node.name, bytes);
diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs
index aab6139349d..cff584c1680 100644
--- a/src/librustc_llvm/ffi.rs
+++ b/src/librustc_llvm/ffi.rs
@@ -575,6 +575,8 @@ extern "C" {
                                    ElementCount: c_uint,
                                    Packed: Bool)
                                    -> TypeRef;
+    pub fn LLVMCountStructElementTypes(StructTy: TypeRef) -> c_uint;
+    pub fn LLVMGetStructElementTypes(StructTy: TypeRef, Dest: *mut TypeRef);
     pub fn LLVMIsPackedStruct(StructTy: TypeRef) -> Bool;
 
     // Operations on array, pointer, and vector types (sequence types)
@@ -583,6 +585,7 @@ extern "C" {
     pub fn LLVMVectorType(ElementType: TypeRef, ElementCount: c_uint) -> TypeRef;
 
     pub fn LLVMGetElementType(Ty: TypeRef) -> TypeRef;
+    pub fn LLVMGetArrayLength(ArrayTy: TypeRef) -> c_uint;
     pub fn LLVMGetVectorSize(VectorTy: TypeRef) -> c_uint;
 
     // Operations on other types
@@ -608,7 +611,10 @@ extern "C" {
     pub fn LLVMConstNull(Ty: TypeRef) -> ValueRef;
     pub fn LLVMConstICmp(Pred: IntPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef;
     pub fn LLVMConstFCmp(Pred: RealPredicate, V1: ValueRef, V2: ValueRef) -> ValueRef;
+    // only for isize/vector
     pub fn LLVMGetUndef(Ty: TypeRef) -> ValueRef;
+    pub fn LLVMIsNull(Val: ValueRef) -> Bool;
+    pub fn LLVMIsUndef(Val: ValueRef) -> Bool;
 
     // Operations on metadata
     pub fn LLVMMDStringInContext(C: ContextRef, Str: *const c_char, SLen: c_uint) -> ValueRef;
@@ -730,9 +736,7 @@ extern "C" {
                                        FunctionTy: TypeRef)
                                        -> ValueRef;
     pub fn LLVMSetFunctionCallConv(Fn: ValueRef, CC: c_uint);
-    pub fn LLVMRustAddAlignmentAttr(Fn: ValueRef, index: c_uint, bytes: u32);
     pub fn LLVMRustAddDereferenceableAttr(Fn: ValueRef, index: c_uint, bytes: u64);
-    pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: ValueRef, index: c_uint, bytes: u64);
     pub fn LLVMRustAddFunctionAttribute(Fn: ValueRef, index: c_uint, attr: Attribute);
     pub fn LLVMRustAddFunctionAttrStringValue(Fn: ValueRef,
                                               index: c_uint,
@@ -762,11 +766,7 @@ extern "C" {
     // Operations on call sites
     pub fn LLVMSetInstructionCallConv(Instr: ValueRef, CC: c_uint);
     pub fn LLVMRustAddCallSiteAttribute(Instr: ValueRef, index: c_uint, attr: Attribute);
-    pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u32);
     pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u64);
-    pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: ValueRef,
-                                                        index: c_uint,
-                                                        bytes: u64);
 
     // Operations on load/store instructions (only)
     pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool);
@@ -1205,13 +1205,15 @@ extern "C" {
     pub fn LLVMRustBuildAtomicLoad(B: BuilderRef,
                                    PointerVal: ValueRef,
                                    Name: *const c_char,
-                                   Order: AtomicOrdering)
+                                   Order: AtomicOrdering,
+                                   Alignment: c_uint)
                                    -> ValueRef;
 
     pub fn LLVMRustBuildAtomicStore(B: BuilderRef,
                                     Val: ValueRef,
                                     Ptr: ValueRef,
-                                    Order: AtomicOrdering)
+                                    Order: AtomicOrdering,
+                                    Alignment: c_uint)
                                     -> ValueRef;
 
     pub fn LLVMRustBuildAtomicCmpXchg(B: BuilderRef,
@@ -1245,6 +1247,23 @@ extern "C" {
 
     /// Creates target data from a target layout string.
     pub fn LLVMCreateTargetData(StringRep: *const c_char) -> TargetDataRef;
+    /// Number of bytes clobbered when doing a Store to *T.
+    pub fn LLVMSizeOfTypeInBits(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong;
+
+    /// Distance between successive elements in an array of T. Includes ABI padding.
+    pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong;
+
+    /// Returns the preferred alignment of a type.
+    pub fn LLVMPreferredAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint;
+    /// Returns the minimum alignment of a type.
+    pub fn LLVMABIAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint;
+
+    /// Computes the byte offset of the indexed struct element for a
+    /// target.
+    pub fn LLVMOffsetOfElement(TD: TargetDataRef,
+                               StructTy: TypeRef,
+                               Element: c_uint)
+                               -> c_ulonglong;
 
     /// Disposes target data.
     pub fn LLVMDisposeTargetData(TD: TargetDataRef);
@@ -1322,6 +1341,11 @@ extern "C" {
                              ElementCount: c_uint,
                              Packed: Bool);
 
+    pub fn LLVMConstNamedStruct(S: TypeRef,
+                                ConstantVals: *const ValueRef,
+                                Count: c_uint)
+                                -> ValueRef;
+
     /// Enables LLVM debug output.
     pub fn LLVMRustSetDebug(Enabled: c_int);
 
diff --git a/src/librustc_llvm/lib.rs b/src/librustc_llvm/lib.rs
index 592bd620564..5ccce8de706 100644
--- a/src/librustc_llvm/lib.rs
+++ b/src/librustc_llvm/lib.rs
@@ -74,19 +74,22 @@ pub fn AddFunctionAttrStringValue(llfn: ValueRef,
     }
 }
 
+#[repr(C)]
 #[derive(Copy, Clone)]
 pub enum AttributePlace {
-    ReturnValue,
     Argument(u32),
     Function,
 }
 
 impl AttributePlace {
+    pub fn ReturnValue() -> Self {
+        AttributePlace::Argument(0)
+    }
+
     pub fn as_uint(self) -> c_uint {
         match self {
-            AttributePlace::ReturnValue => 0,
-            AttributePlace::Argument(i) => 1 + i,
             AttributePlace::Function => !0,
+            AttributePlace::Argument(i) => i,
         }
     }
 }
diff --git a/src/librustc_mir/build/matches/simplify.rs b/src/librustc_mir/build/matches/simplify.rs
index a7599f19244..9b3f16f1ab4 100644
--- a/src/librustc_mir/build/matches/simplify.rs
+++ b/src/librustc_mir/build/matches/simplify.rs
@@ -98,16 +98,19 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
             }
 
             PatternKind::Variant { adt_def, substs, variant_index, ref subpatterns } => {
-                let irrefutable = adt_def.variants.iter().enumerate().all(|(i, v)| {
-                    i == variant_index || {
-                        self.hir.tcx().sess.features.borrow().never_type &&
-                        self.hir.tcx().is_variant_uninhabited_from_all_modules(v, substs)
+                if self.hir.tcx().sess.features.borrow().never_type {
+                    let irrefutable = adt_def.variants.iter().enumerate().all(|(i, v)| {
+                        i == variant_index || {
+                            self.hir.tcx().is_variant_uninhabited_from_all_modules(v, substs)
+                        }
+                    });
+                    if irrefutable {
+                        let lvalue = match_pair.lvalue.downcast(adt_def, variant_index);
+                        candidate.match_pairs.extend(self.field_match_pairs(lvalue, subpatterns));
+                        Ok(())
+                    } else {
+                        Err(match_pair)
                     }
-                });
-                if irrefutable {
-                    let lvalue = match_pair.lvalue.downcast(adt_def, variant_index);
-                    candidate.match_pairs.extend(self.field_match_pairs(lvalue, subpatterns));
-                    Ok(())
                 } else {
                     Err(match_pair)
                 }
diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs
index 02a7bc83f6e..1cf35af3a9e 100644
--- a/src/librustc_mir/build/matches/test.rs
+++ b/src/librustc_mir/build/matches/test.rs
@@ -39,7 +39,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
                     span: match_pair.pattern.span,
                     kind: TestKind::Switch {
                         adt_def: adt_def.clone(),
-                        variants: BitVector::new(adt_def.variants.len()),
+                        variants: BitVector::new(self.hir.num_variants(adt_def)),
                     },
                 }
             }
@@ -184,7 +184,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
         match test.kind {
             TestKind::Switch { adt_def, ref variants } => {
                 // Variants is a BitVec of indexes into adt_def.variants.
-                let num_enum_variants = adt_def.variants.len();
+                let num_enum_variants = self.hir.num_variants(adt_def);
                 let used_variants = variants.count();
                 let mut otherwise_block = None;
                 let mut target_blocks = Vec::with_capacity(num_enum_variants);
diff --git a/src/librustc_mir/hair/cx/mod.rs b/src/librustc_mir/hair/cx/mod.rs
index b1f4b849b89..50264238aac 100644
--- a/src/librustc_mir/hair/cx/mod.rs
+++ b/src/librustc_mir/hair/cx/mod.rs
@@ -213,6 +213,10 @@ impl<'a, 'gcx, 'tcx> Cx<'a, 'gcx, 'tcx> {
         bug!("found no method `{}` in `{:?}`", method_name, trait_def_id);
     }
 
+    pub fn num_variants(&mut self, adt_def: &ty::AdtDef) -> usize {
+        adt_def.variants.len()
+    }
+
     pub fn all_fields(&mut self, adt_def: &ty::AdtDef, variant_index: usize) -> Vec<Field> {
         (0..adt_def.variants[variant_index].fields.len())
             .map(Field::new)
diff --git a/src/librustc_mir/transform/deaggregator.rs b/src/librustc_mir/transform/deaggregator.rs
index e2ecd4839fb..61b4716c564 100644
--- a/src/librustc_mir/transform/deaggregator.rs
+++ b/src/librustc_mir/transform/deaggregator.rs
@@ -67,7 +67,7 @@ impl MirPass for Deaggregator {
                     let ty = variant_def.fields[i].ty(tcx, substs);
                     let rhs = Rvalue::Use(op.clone());
 
-                    let lhs_cast = if adt_def.is_enum() {
+                    let lhs_cast = if adt_def.variants.len() > 1 {
                         Lvalue::Projection(Box::new(LvalueProjection {
                             base: lhs.clone(),
                             elem: ProjectionElem::Downcast(adt_def, variant),
@@ -89,7 +89,7 @@ impl MirPass for Deaggregator {
                 }
 
                 // if the aggregate was an enum, we need to set the discriminant
-                if adt_def.is_enum() {
+                if adt_def.variants.len() > 1 {
                     let set_discriminant = Statement {
                         kind: StatementKind::SetDiscriminant {
                             lvalue: lhs.clone(),
diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs
index 4b7856f857b..628a8161615 100644
--- a/src/librustc_mir/transform/inline.rs
+++ b/src/librustc_mir/transform/inline.rs
@@ -19,7 +19,6 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec};
 use rustc::mir::*;
 use rustc::mir::visit::*;
 use rustc::ty::{self, Instance, Ty, TyCtxt, TypeFoldable};
-use rustc::ty::layout::LayoutOf;
 use rustc::ty::subst::{Subst,Substs};
 
 use std::collections::VecDeque;
@@ -626,7 +625,9 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> {
 fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
                           param_env: ty::ParamEnv<'tcx>,
                           ty: Ty<'tcx>) -> Option<u64> {
-    (tcx, param_env).layout_of(ty).ok().map(|layout| layout.size.bytes())
+    ty.layout(tcx, param_env).ok().map(|layout| {
+        layout.size(&tcx.data_layout).bytes()
+    })
 }
 
 fn subst_and_normalize<'a, 'tcx: 'a>(
diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs
index cc6b7020903..b70d0fb9c2c 100644
--- a/src/librustc_mir/transform/type_check.rs
+++ b/src/librustc_mir/transform/type_check.rs
@@ -344,7 +344,7 @@ impl<'a, 'b, 'gcx, 'tcx> TypeVerifier<'a, 'b, 'gcx, 'tcx> {
                 variant_index,
             } => (&adt_def.variants[variant_index], substs),
             LvalueTy::Ty { ty } => match ty.sty {
-                ty::TyAdt(adt_def, substs) if !adt_def.is_enum() => {
+                ty::TyAdt(adt_def, substs) if adt_def.is_univariant() => {
                     (&adt_def.variants[0], substs)
                 }
                 ty::TyClosure(def_id, substs) => {
diff --git a/src/librustc_mir/util/elaborate_drops.rs b/src/librustc_mir/util/elaborate_drops.rs
index 1852712a083..3b9772079ad 100644
--- a/src/librustc_mir/util/elaborate_drops.rs
+++ b/src/librustc_mir/util/elaborate_drops.rs
@@ -384,7 +384,7 @@ impl<'l, 'b, 'tcx, D> DropCtxt<'l, 'b, 'tcx, D>
                                   substs: &'tcx Substs<'tcx>)
                                   -> (BasicBlock, Unwind) {
         let (succ, unwind) = self.drop_ladder_bottom();
-        if !adt.is_enum() {
+        if adt.variants.len() == 1 {
             let fields = self.move_paths_for_fields(
                 self.lvalue,
                 self.path,
diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs
index 54828044de6..6df40c34ec5 100644
--- a/src/librustc_trans/abi.rs
+++ b/src/librustc_trans/abi.rs
@@ -11,7 +11,7 @@
 use llvm::{self, ValueRef, AttributePlace};
 use base;
 use builder::Builder;
-use common::{instance_ty, ty_fn_sig, C_usize};
+use common::{instance_ty, ty_fn_sig, type_is_fat_ptr, C_usize};
 use context::CrateContext;
 use cabi_x86;
 use cabi_x86_64;
@@ -30,34 +30,31 @@ use cabi_sparc64;
 use cabi_nvptx;
 use cabi_nvptx64;
 use cabi_hexagon;
-use mir::lvalue::{Alignment, LvalueRef};
-use mir::operand::OperandValue;
+use machine::llalign_of_min;
 use type_::Type;
-use type_of::{LayoutLlvmExt, PointerKind};
+use type_of;
 
+use rustc::hir;
 use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, Align, Size, TyLayout};
-use rustc::ty::layout::{HasDataLayout, LayoutOf};
+use rustc::ty::layout::{self, Layout, LayoutTyper, TyLayout, Size};
+use rustc_back::PanicStrategy;
 
 use libc::c_uint;
-use std::{cmp, iter};
+use std::cmp;
+use std::iter;
 
 pub use syntax::abi::Abi;
 pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
 
-#[derive(Clone, Copy, PartialEq, Eq, Debug)]
-pub enum PassMode {
-    /// Ignore the argument (useful for empty struct).
+#[derive(Clone, Copy, PartialEq, Debug)]
+enum ArgKind {
+    /// Pass the argument directly using the normal converted
+    /// LLVM type or by coercing to another specified type
+    Direct,
+    /// Pass the argument indirectly via a hidden pointer
+    Indirect,
+    /// Ignore the argument (useful for empty struct)
     Ignore,
-    /// Pass the argument directly.
-    Direct(ArgAttributes),
-    /// Pass a pair's elements directly in two arguments.
-    Pair(ArgAttributes, ArgAttributes),
-    /// Pass the argument after casting it, to either
-    /// a single uniform or a pair of registers.
-    Cast(CastTarget),
-    /// Pass the argument indirectly via a hidden pointer.
-    Indirect(ArgAttributes),
 }
 
 // Hack to disable non_upper_case_globals only for the bitflags! and not for the rest
@@ -99,78 +96,46 @@ impl ArgAttribute {
 
 /// A compact representation of LLVM attributes (at least those relevant for this module)
 /// that can be manipulated without interacting with LLVM's Attribute machinery.
-#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+#[derive(Copy, Clone, Debug, Default)]
 pub struct ArgAttributes {
     regular: ArgAttribute,
-    pointee_size: Size,
-    pointee_align: Option<Align>
+    dereferenceable_bytes: u64,
 }
 
 impl ArgAttributes {
-    fn new() -> Self {
-        ArgAttributes {
-            regular: ArgAttribute::default(),
-            pointee_size: Size::from_bytes(0),
-            pointee_align: None,
-        }
-    }
-
     pub fn set(&mut self, attr: ArgAttribute) -> &mut Self {
         self.regular = self.regular | attr;
         self
     }
 
+    pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self {
+        self.dereferenceable_bytes = bytes;
+        self
+    }
+
     pub fn contains(&self, attr: ArgAttribute) -> bool {
         self.regular.contains(attr)
     }
 
     pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) {
-        let mut regular = self.regular;
         unsafe {
-            let deref = self.pointee_size.bytes();
-            if deref != 0 {
-                if regular.contains(ArgAttribute::NonNull) {
-                    llvm::LLVMRustAddDereferenceableAttr(llfn,
-                                                         idx.as_uint(),
-                                                         deref);
-                } else {
-                    llvm::LLVMRustAddDereferenceableOrNullAttr(llfn,
-                                                               idx.as_uint(),
-                                                               deref);
-                }
-                regular -= ArgAttribute::NonNull;
+            self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
+            if self.dereferenceable_bytes != 0 {
+                llvm::LLVMRustAddDereferenceableAttr(llfn,
+                                                     idx.as_uint(),
+                                                     self.dereferenceable_bytes);
             }
-            if let Some(align) = self.pointee_align {
-                llvm::LLVMRustAddAlignmentAttr(llfn,
-                                               idx.as_uint(),
-                                               align.abi() as u32);
-            }
-            regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
         }
     }
 
     pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) {
-        let mut regular = self.regular;
         unsafe {
-            let deref = self.pointee_size.bytes();
-            if deref != 0 {
-                if regular.contains(ArgAttribute::NonNull) {
-                    llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
-                                                                 idx.as_uint(),
-                                                                 deref);
-                } else {
-                    llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite,
-                                                                       idx.as_uint(),
-                                                                       deref);
-                }
-                regular -= ArgAttribute::NonNull;
-            }
-            if let Some(align) = self.pointee_align {
-                llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
-                                                       idx.as_uint(),
-                                                       align.abi() as u32);
+            self.regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
+            if self.dereferenceable_bytes != 0 {
+                llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite,
+                                                             idx.as_uint(),
+                                                             self.dereferenceable_bytes);
             }
-            regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
         }
     }
 }
@@ -209,32 +174,7 @@ impl Reg {
 }
 
 impl Reg {
-    pub fn align(&self, ccx: &CrateContext) -> Align {
-        let dl = ccx.data_layout();
-        match self.kind {
-            RegKind::Integer => {
-                match self.size.bits() {
-                    1 => dl.i1_align,
-                    2...8 => dl.i8_align,
-                    9...16 => dl.i16_align,
-                    17...32 => dl.i32_align,
-                    33...64 => dl.i64_align,
-                    65...128 => dl.i128_align,
-                    _ => bug!("unsupported integer: {:?}", self)
-                }
-            }
-            RegKind::Float => {
-                match self.size.bits() {
-                    32 => dl.f32_align,
-                    64 => dl.f64_align,
-                    _ => bug!("unsupported float: {:?}", self)
-                }
-            }
-            RegKind::Vector => dl.vector_align(self.size)
-        }
-    }
-
-    pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
+    fn llvm_type(&self, ccx: &CrateContext) -> Type {
         match self.kind {
             RegKind::Integer => Type::ix(ccx, self.size.bits()),
             RegKind::Float => {
@@ -253,7 +193,7 @@ impl Reg {
 
 /// An argument passed entirely registers with the
 /// same kind (e.g. HFA / HVA on PPC64 and AArch64).
-#[derive(Clone, Copy, PartialEq, Eq, Debug)]
+#[derive(Copy, Clone)]
 pub struct Uniform {
     pub unit: Reg,
 
@@ -276,11 +216,7 @@ impl From<Reg> for Uniform {
 }
 
 impl Uniform {
-    pub fn align(&self, ccx: &CrateContext) -> Align {
-        self.unit.align(ccx)
-    }
-
-    pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
+    fn llvm_type(&self, ccx: &CrateContext) -> Type {
         let llunit = self.unit.llvm_type(ccx);
 
         if self.total <= self.unit.size {
@@ -312,62 +248,106 @@ pub trait LayoutExt<'tcx> {
 
 impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
     fn is_aggregate(&self) -> bool {
-        match self.abi {
-            layout::Abi::Uninhabited |
-            layout::Abi::Scalar(_) |
-            layout::Abi::Vector => false,
-            layout::Abi::ScalarPair(..) |
-            layout::Abi::Aggregate { .. } => true
+        match *self.layout {
+            Layout::Scalar { .. } |
+            Layout::RawNullablePointer { .. } |
+            Layout::CEnum { .. } |
+            Layout::Vector { .. } => false,
+
+            Layout::Array { .. } |
+            Layout::FatPointer { .. } |
+            Layout::Univariant { .. } |
+            Layout::UntaggedUnion { .. } |
+            Layout::General { .. } |
+            Layout::StructWrappedNullablePointer { .. } => true
         }
     }
 
     fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg> {
-        match self.abi {
-            layout::Abi::Uninhabited => None,
-
-            // The primitive for this algorithm.
-            layout::Abi::Scalar(ref scalar) => {
-                let kind = match scalar.value {
-                    layout::Int(..) |
+        match *self.layout {
+            // The primitives for this algorithm.
+            Layout::Scalar { value, .. } |
+            Layout::RawNullablePointer { value, .. } => {
+                let kind = match value {
+                    layout::Int(_) |
                     layout::Pointer => RegKind::Integer,
                     layout::F32 |
                     layout::F64 => RegKind::Float
                 };
                 Some(Reg {
                     kind,
-                    size: self.size
+                    size: self.size(ccx)
                 })
             }
 
-            layout::Abi::Vector => {
+            Layout::CEnum { .. } => {
+                Some(Reg {
+                    kind: RegKind::Integer,
+                    size: self.size(ccx)
+                })
+            }
+
+            Layout::Vector { .. } => {
                 Some(Reg {
                     kind: RegKind::Vector,
-                    size: self.size
+                    size: self.size(ccx)
                 })
             }
 
-            layout::Abi::ScalarPair(..) |
-            layout::Abi::Aggregate { .. } => {
-                let mut total = Size::from_bytes(0);
+            Layout::Array { count, .. } => {
+                if count > 0 {
+                    self.field(ccx, 0).homogeneous_aggregate(ccx)
+                } else {
+                    None
+                }
+            }
+
+            Layout::Univariant { ref variant, .. } => {
+                let mut unaligned_offset = Size::from_bytes(0);
                 let mut result = None;
 
-                let is_union = match self.fields {
-                    layout::FieldPlacement::Array { count, .. } => {
-                        if count > 0 {
-                            return self.field(ccx, 0).homogeneous_aggregate(ccx);
-                        } else {
-                            return None;
+                for i in 0..self.field_count() {
+                    if unaligned_offset != variant.offsets[i] {
+                        return None;
+                    }
+
+                    let field = self.field(ccx, i);
+                    match (result, field.homogeneous_aggregate(ccx)) {
+                        // The field itself must be a homogeneous aggregate.
+                        (_, None) => return None,
+                        // If this is the first field, record the unit.
+                        (None, Some(unit)) => {
+                            result = Some(unit);
+                        }
+                        // For all following fields, the unit must be the same.
+                        (Some(prev_unit), Some(unit)) => {
+                            if prev_unit != unit {
+                                return None;
+                            }
                         }
                     }
-                    layout::FieldPlacement::Union(_) => true,
-                    layout::FieldPlacement::Arbitrary { .. } => false
-                };
 
-                for i in 0..self.fields.count() {
-                    if !is_union && total != self.fields.offset(i) {
-                        return None;
+                    // Keep track of the offset (without padding).
+                    let size = field.size(ccx);
+                    match unaligned_offset.checked_add(size, ccx) {
+                        Some(offset) => unaligned_offset = offset,
+                        None => return None
                     }
+                }
+
+                // There needs to be no padding.
+                if unaligned_offset != self.size(ccx) {
+                    None
+                } else {
+                    result
+                }
+            }
 
+            Layout::UntaggedUnion { .. } => {
+                let mut max = Size::from_bytes(0);
+                let mut result = None;
+
+                for i in 0..self.field_count() {
                     let field = self.field(ccx, i);
                     match (result, field.homogeneous_aggregate(ccx)) {
                         // The field itself must be a homogeneous aggregate.
@@ -385,26 +365,28 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
                     }
 
                     // Keep track of the offset (without padding).
-                    let size = field.size;
-                    if is_union {
-                        total = cmp::max(total, size);
-                    } else {
-                        total += size;
+                    let size = field.size(ccx);
+                    if size > max {
+                        max = size;
                     }
                 }
 
                 // There needs to be no padding.
-                if total != self.size {
+                if max != self.size(ccx) {
                     None
                 } else {
                     result
                 }
             }
+
+            // Rust-specific types, which we can ignore for C ABIs.
+            Layout::FatPointer { .. } |
+            Layout::General { .. } |
+            Layout::StructWrappedNullablePointer { .. } => None
         }
     }
 }
 
-#[derive(Clone, Copy, PartialEq, Eq, Debug)]
 pub enum CastTarget {
     Uniform(Uniform),
     Pair(Reg, Reg)
@@ -423,28 +405,7 @@ impl From<Uniform> for CastTarget {
 }
 
 impl CastTarget {
-    pub fn size(&self, ccx: &CrateContext) -> Size {
-        match *self {
-            CastTarget::Uniform(u) => u.total,
-            CastTarget::Pair(a, b) => {
-                (a.size.abi_align(a.align(ccx)) + b.size)
-                    .abi_align(self.align(ccx))
-            }
-        }
-    }
-
-    pub fn align(&self, ccx: &CrateContext) -> Align {
-        match *self {
-            CastTarget::Uniform(u) => u.align(ccx),
-            CastTarget::Pair(a, b) => {
-                ccx.data_layout().aggregate_align
-                    .max(a.align(ccx))
-                    .max(b.align(ccx))
-            }
-        }
-    }
-
-    pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
+    fn llvm_type(&self, ccx: &CrateContext) -> Type {
         match *self {
             CastTarget::Uniform(u) => u.llvm_type(ccx),
             CastTarget::Pair(a, b) => {
@@ -457,118 +418,131 @@ impl CastTarget {
     }
 }
 
-/// Information about how to pass an argument to,
-/// or return a value from, a function, under some ABI.
-#[derive(Debug)]
+/// Information about how a specific C type
+/// should be passed to or returned from a function
+///
+/// This is borrowed from clang's ABIInfo.h
+#[derive(Clone, Copy, Debug)]
 pub struct ArgType<'tcx> {
+    kind: ArgKind,
     pub layout: TyLayout<'tcx>,
-
-    /// Dummy argument, which is emitted before the real argument.
-    pub pad: Option<Reg>,
-
-    pub mode: PassMode,
+    /// Coerced LLVM Type
+    pub cast: Option<Type>,
+    /// Dummy argument, which is emitted before the real argument
+    pub pad: Option<Type>,
+    /// LLVM attributes of argument
+    pub attrs: ArgAttributes
 }
 
 impl<'a, 'tcx> ArgType<'tcx> {
     fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
         ArgType {
+            kind: ArgKind::Direct,
             layout,
+            cast: None,
             pad: None,
-            mode: PassMode::Direct(ArgAttributes::new()),
+            attrs: ArgAttributes::default()
         }
     }
 
-    pub fn make_indirect(&mut self) {
-        assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
+    pub fn make_indirect(&mut self, ccx: &CrateContext<'a, 'tcx>) {
+        assert_eq!(self.kind, ArgKind::Direct);
+
+        // Wipe old attributes, likely not valid through indirection.
+        self.attrs = ArgAttributes::default();
 
-        // Start with fresh attributes for the pointer.
-        let mut attrs = ArgAttributes::new();
+        let llarg_sz = self.layout.size(ccx).bytes();
 
         // For non-immediate arguments the callee gets its own copy of
         // the value on the stack, so there are no aliases. It's also
         // program-invisible so can't possibly capture
-        attrs.set(ArgAttribute::NoAlias)
-             .set(ArgAttribute::NoCapture)
-             .set(ArgAttribute::NonNull);
-        attrs.pointee_size = self.layout.size;
-        // FIXME(eddyb) We should be doing this, but at least on
-        // i686-pc-windows-msvc, it results in wrong stack offsets.
-        // attrs.pointee_align = Some(self.layout.align);
-
-        self.mode = PassMode::Indirect(attrs);
+        self.attrs.set(ArgAttribute::NoAlias)
+                  .set(ArgAttribute::NoCapture)
+                  .set_dereferenceable(llarg_sz);
+
+        self.kind = ArgKind::Indirect;
     }
 
-    pub fn make_indirect_byval(&mut self) {
-        self.make_indirect();
-        match self.mode {
-            PassMode::Indirect(ref mut attrs) => {
-                attrs.set(ArgAttribute::ByVal);
-            }
-            _ => bug!()
-        }
+    pub fn ignore(&mut self) {
+        assert_eq!(self.kind, ArgKind::Direct);
+        self.kind = ArgKind::Ignore;
     }
 
     pub fn extend_integer_width_to(&mut self, bits: u64) {
         // Only integers have signedness
-        if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
-            if let layout::Int(i, signed) = scalar.value {
-                if i.size().bits() < bits {
-                    if let PassMode::Direct(ref mut attrs) = self.mode {
-                        attrs.set(if signed {
-                            ArgAttribute::SExt
+        let (i, signed) = match *self.layout {
+            Layout::Scalar { value, .. } => {
+                match value {
+                    layout::Int(i) => {
+                        if self.layout.ty.is_integral() {
+                            (i, self.layout.ty.is_signed())
                         } else {
-                            ArgAttribute::ZExt
-                        });
+                            return;
+                        }
                     }
+                    _ => return
                 }
             }
+
+            // Rust enum types that map onto C enums also need to follow
+            // the target ABI zero-/sign-extension rules.
+            Layout::CEnum { discr, signed, .. } => (discr, signed),
+
+            _ => return
+        };
+
+        if i.size().bits() < bits {
+            self.attrs.set(if signed {
+                ArgAttribute::SExt
+            } else {
+                ArgAttribute::ZExt
+            });
         }
     }
 
-    pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) {
-        assert_eq!(self.mode, PassMode::Direct(ArgAttributes::new()));
-        self.mode = PassMode::Cast(target.into());
+    pub fn cast_to<T: Into<CastTarget>>(&mut self, ccx: &CrateContext, target: T) {
+        self.cast = Some(target.into().llvm_type(ccx));
     }
 
-    pub fn pad_with(&mut self, reg: Reg) {
-        self.pad = Some(reg);
+    pub fn pad_with(&mut self, ccx: &CrateContext, reg: Reg) {
+        self.pad = Some(reg.llvm_type(ccx));
     }
 
     pub fn is_indirect(&self) -> bool {
-        match self.mode {
-            PassMode::Indirect(_) => true,
-            _ => false
-        }
+        self.kind == ArgKind::Indirect
     }
 
     pub fn is_ignore(&self) -> bool {
-        self.mode == PassMode::Ignore
+        self.kind == ArgKind::Ignore
     }
 
     /// Get the LLVM type for an lvalue of the original Rust type of
     /// this argument/return, i.e. the result of `type_of::type_of`.
     pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
-        self.layout.llvm_type(ccx)
+        type_of::type_of(ccx, self.layout.ty)
     }
 
     /// Store a direct/indirect value described by this ArgType into a
     /// lvalue for the original Rust type of this argument/return.
     /// Can be used for both storing formal arguments into Rust variables
     /// or results of call/invoke instructions into their destinations.
-    pub fn store(&self, bcx: &Builder<'a, 'tcx>, val: ValueRef, dst: LvalueRef<'tcx>) {
+    pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: ValueRef) {
         if self.is_ignore() {
             return;
         }
         let ccx = bcx.ccx;
         if self.is_indirect() {
-            OperandValue::Ref(val, Alignment::AbiAligned).store(bcx, dst)
-        } else if let PassMode::Cast(cast) = self.mode {
+            let llsz = C_usize(ccx, self.layout.size(ccx).bytes());
+            let llalign = self.layout.align(ccx).abi();
+            base::call_memcpy(bcx, dst, val, llsz, llalign as u32);
+        } else if let Some(ty) = self.cast {
             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
             if can_store_through_cast_ptr {
-                let cast_dst = bcx.pointercast(dst.llval, cast.llvm_type(ccx).ptr_to());
-                bcx.store(val, cast_dst, Some(self.layout.align));
+                let cast_dst = bcx.pointercast(dst, ty.ptr_to());
+                let llalign = self.layout.align(ccx).abi();
+                bcx.store(val, cast_dst, Some(llalign as u32));
             } else {
                 // The actual return type is a struct, but the ABI
                 // adaptation code has cast it into some scalar type.  The
@@ -585,45 +559,40 @@ impl<'a, 'tcx> ArgType<'tcx> {
                 //   bitcasting to the struct type yields invalid cast errors.
 
                 // We instead thus allocate some scratch space...
-                let llscratch = bcx.alloca(cast.llvm_type(ccx), "abi_cast", cast.align(ccx));
-                let scratch_size = cast.size(ccx);
-                bcx.lifetime_start(llscratch, scratch_size);
+                let llscratch = bcx.alloca(ty, "abi_cast", None);
+                base::Lifetime::Start.call(bcx, llscratch);
 
                 // ...where we first store the value...
                 bcx.store(val, llscratch, None);
 
                 // ...and then memcpy it to the intended destination.
                 base::call_memcpy(bcx,
-                                  bcx.pointercast(dst.llval, Type::i8p(ccx)),
+                                  bcx.pointercast(dst, Type::i8p(ccx)),
                                   bcx.pointercast(llscratch, Type::i8p(ccx)),
-                                  C_usize(ccx, self.layout.size.bytes()),
-                                  self.layout.align.min(cast.align(ccx)));
+                                  C_usize(ccx, self.layout.size(ccx).bytes()),
+                                  cmp::min(self.layout.align(ccx).abi() as u32,
+                                           llalign_of_min(ccx, ty)));
 
-                bcx.lifetime_end(llscratch, scratch_size);
+                base::Lifetime::End.call(bcx, llscratch);
             }
         } else {
-            OperandValue::Immediate(val).store(bcx, dst);
+            if self.layout.ty == ccx.tcx().types.bool {
+                val = bcx.zext(val, Type::i8(ccx));
+            }
+            bcx.store(val, dst, None);
         }
     }
 
-    pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: LvalueRef<'tcx>) {
+    pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: ValueRef) {
         if self.pad.is_some() {
             *idx += 1;
         }
-        let mut next = || {
-            let val = llvm::get_param(bcx.llfn(), *idx as c_uint);
-            *idx += 1;
-            val
-        };
-        match self.mode {
-            PassMode::Ignore => {},
-            PassMode::Pair(..) => {
-                OperandValue::Pair(next(), next()).store(bcx, dst);
-            }
-            PassMode::Direct(_) | PassMode::Indirect(_) | PassMode::Cast(_) => {
-                self.store(bcx, next(), dst);
-            }
+        if self.is_ignore() {
+            return;
         }
+        let val = llvm::get_param(bcx.llfn(), *idx as c_uint);
+        *idx += 1;
+        self.store(bcx, val, dst);
     }
 }
 
@@ -632,7 +601,7 @@ impl<'a, 'tcx> ArgType<'tcx> {
 ///
 /// I will do my best to describe this structure, but these
 /// comments are reverse-engineered and may be inaccurate. -NDM
-#[derive(Debug)]
+#[derive(Clone, Debug)]
 pub struct FnType<'tcx> {
     /// The LLVM types of each argument.
     pub args: Vec<ArgType<'tcx>>,
@@ -651,14 +620,14 @@ impl<'a, 'tcx> FnType<'tcx> {
         let fn_ty = instance_ty(ccx.tcx(), &instance);
         let sig = ty_fn_sig(ccx, fn_ty);
         let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
-        FnType::new(ccx, sig, &[])
+        Self::new(ccx, sig, &[])
     }
 
     pub fn new(ccx: &CrateContext<'a, 'tcx>,
                sig: ty::FnSig<'tcx>,
                extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
         let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
-        fn_ty.adjust_for_abi(ccx, sig.abi);
+        fn_ty.adjust_for_abi(ccx, sig);
         fn_ty
     }
 
@@ -667,23 +636,8 @@ impl<'a, 'tcx> FnType<'tcx> {
                       extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
         let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
         // Don't pass the vtable, it's not an argument of the virtual fn.
-        {
-            let self_arg = &mut fn_ty.args[0];
-            match self_arg.mode {
-                PassMode::Pair(data_ptr, _) => {
-                    self_arg.mode = PassMode::Direct(data_ptr);
-                }
-                _ => bug!("FnType::new_vtable: non-pair self {:?}", self_arg)
-            }
-
-            let pointee = self_arg.layout.ty.builtin_deref(true, ty::NoPreference)
-                .unwrap_or_else(|| {
-                    bug!("FnType::new_vtable: non-pointer self {:?}", self_arg)
-                }).ty;
-            let fat_ptr_ty = ccx.tcx().mk_mut_ptr(pointee);
-            self_arg.layout = ccx.layout_of(fat_ptr_ty).field(ccx, 0);
-        }
-        fn_ty.adjust_for_abi(ccx, sig.abi);
+        fn_ty.args[1].ignore();
+        fn_ty.adjust_for_abi(ccx, sig);
         fn_ty
     }
 
@@ -748,113 +702,120 @@ impl<'a, 'tcx> FnType<'tcx> {
             _ => false
         };
 
-        // Handle safe Rust thin and fat pointers.
-        let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
-                                      scalar: &layout::Scalar,
-                                      layout: TyLayout<'tcx>,
-                                      offset: Size,
-                                      is_return: bool| {
-            // Booleans are always an i1 that needs to be zero-extended.
-            if scalar.is_bool() {
-                attrs.set(ArgAttribute::ZExt);
-                return;
+        let arg_of = |ty: Ty<'tcx>, is_return: bool| {
+            let mut arg = ArgType::new(ccx.layout_of(ty));
+            if ty.is_bool() {
+                arg.attrs.set(ArgAttribute::ZExt);
+            } else {
+                if arg.layout.size(ccx).bytes() == 0 {
+                    // For some forsaken reason, x86_64-pc-windows-gnu
+                    // doesn't ignore zero-sized struct arguments.
+                    // The same is true for s390x-unknown-linux-gnu.
+                    if is_return || rust_abi ||
+                       (!win_x64_gnu && !linux_s390x) {
+                        arg.ignore();
+                    }
+                }
             }
+            arg
+        };
+
+        let ret_ty = sig.output();
+        let mut ret = arg_of(ret_ty, true);
 
-            // Only pointer types handled below.
-            if scalar.value != layout::Pointer {
-                return;
+        if !type_is_fat_ptr(ccx, ret_ty) {
+            // The `noalias` attribute on the return value is useful to a
+            // function ptr caller.
+            if ret_ty.is_box() {
+                // `Box` pointer return values never alias because ownership
+                // is transferred
+                ret.attrs.set(ArgAttribute::NoAlias);
             }
 
-            if scalar.valid_range.start < scalar.valid_range.end {
-                if scalar.valid_range.start > 0 {
-                    attrs.set(ArgAttribute::NonNull);
+            // We can also mark the return value as `dereferenceable` in certain cases
+            match ret_ty.sty {
+                // These are not really pointers but pairs, (pointer, len)
+                ty::TyRef(_, ty::TypeAndMut { ty, .. }) => {
+                    ret.attrs.set_dereferenceable(ccx.size_of(ty));
+                }
+                ty::TyAdt(def, _) if def.is_box() => {
+                    ret.attrs.set_dereferenceable(ccx.size_of(ret_ty.boxed_ty()));
                 }
+                _ => {}
             }
+        }
 
-            if let Some(pointee) = layout.pointee_info_at(ccx, offset) {
-                if let Some(kind) = pointee.safe {
-                    attrs.pointee_size = pointee.size;
-                    attrs.pointee_align = Some(pointee.align);
+        let mut args = Vec::with_capacity(inputs.len() + extra_args.len());
 
-                    // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions
-                    // with align attributes, and those calls later block optimizations.
-                    if !is_return {
-                        attrs.pointee_align = None;
-                    }
+        // Handle safe Rust thin and fat pointers.
+        let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty {
+            // `Box` pointer parameters never alias because ownership is transferred
+            ty::TyAdt(def, _) if def.is_box() => {
+                arg.attrs.set(ArgAttribute::NoAlias);
+                Some(ty.boxed_ty())
+            }
 
-                    // `Box` pointer parameters never alias because ownership is transferred
-                    // `&mut` pointer parameters never alias other parameters,
-                    // or mutable global data
-                    //
-                    // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
-                    // and can be marked as both `readonly` and `noalias`, as
-                    // LLVM's definition of `noalias` is based solely on memory
-                    // dependencies rather than pointer equality
-                    let no_alias = match kind {
-                        PointerKind::Shared => false,
-                        PointerKind::Frozen | PointerKind::UniqueOwned => true,
-                        PointerKind::UniqueBorrowed => !is_return
+            ty::TyRef(_, mt) => {
+                // `&mut` pointer parameters never alias other parameters, or mutable global data
+                //
+                // `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
+                // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
+                // on memory dependencies rather than pointer equality
+                let is_freeze = ccx.shared().type_is_freeze(mt.ty);
+
+                let no_alias_is_safe =
+                    if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias ||
+                       ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort {
+                        // Mutable refrences or immutable shared references
+                        mt.mutbl == hir::MutMutable || is_freeze
+                    } else {
+                        // Only immutable shared references
+                        mt.mutbl != hir::MutMutable && is_freeze
                     };
-                    if no_alias {
-                        attrs.set(ArgAttribute::NoAlias);
-                    }
 
-                    if kind == PointerKind::Frozen && !is_return {
-                        attrs.set(ArgAttribute::ReadOnly);
-                    }
+                if no_alias_is_safe {
+                    arg.attrs.set(ArgAttribute::NoAlias);
                 }
-            }
-        };
 
-        let arg_of = |ty: Ty<'tcx>, is_return: bool| {
-            let mut arg = ArgType::new(ccx.layout_of(ty));
-            if arg.layout.is_zst() {
-                // For some forsaken reason, x86_64-pc-windows-gnu
-                // doesn't ignore zero-sized struct arguments.
-                // The same is true for s390x-unknown-linux-gnu.
-                if is_return || rust_abi || (!win_x64_gnu && !linux_s390x) {
-                    arg.mode = PassMode::Ignore;
+                if mt.mutbl == hir::MutImmutable && is_freeze {
+                    arg.attrs.set(ArgAttribute::ReadOnly);
                 }
-            }
 
-            // FIXME(eddyb) other ABIs don't have logic for scalar pairs.
-            if !is_return && rust_abi {
-                if let layout::Abi::ScalarPair(ref a, ref b) = arg.layout.abi {
-                    let mut a_attrs = ArgAttributes::new();
-                    let mut b_attrs = ArgAttributes::new();
-                    adjust_for_rust_scalar(&mut a_attrs,
-                                           a,
-                                           arg.layout,
-                                           Size::from_bytes(0),
-                                           false);
-                    adjust_for_rust_scalar(&mut b_attrs,
-                                           b,
-                                           arg.layout,
-                                           a.value.size(ccx).abi_align(b.value.align(ccx)),
-                                           false);
-                    arg.mode = PassMode::Pair(a_attrs, b_attrs);
-                    return arg;
-                }
+                Some(mt.ty)
             }
+            _ => None
+        };
 
-            if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
-                if let PassMode::Direct(ref mut attrs) = arg.mode {
-                    adjust_for_rust_scalar(attrs,
-                                           scalar,
-                                           arg.layout,
-                                           Size::from_bytes(0),
-                                           is_return);
+        for ty in inputs.iter().chain(extra_args.iter()) {
+            let mut arg = arg_of(ty, false);
+
+            if let ty::layout::FatPointer { .. } = *arg.layout {
+                let mut data = ArgType::new(arg.layout.field(ccx, 0));
+                let mut info = ArgType::new(arg.layout.field(ccx, 1));
+
+                if let Some(inner) = rust_ptr_attrs(ty, &mut data) {
+                    data.attrs.set(ArgAttribute::NonNull);
+                    if ccx.tcx().struct_tail(inner).is_trait() {
+                        // vtables can be safely marked non-null, readonly
+                        // and noalias.
+                        info.attrs.set(ArgAttribute::NonNull);
+                        info.attrs.set(ArgAttribute::ReadOnly);
+                        info.attrs.set(ArgAttribute::NoAlias);
+                    }
+                }
+                args.push(data);
+                args.push(info);
+            } else {
+                if let Some(inner) = rust_ptr_attrs(ty, &mut arg) {
+                    arg.attrs.set_dereferenceable(ccx.size_of(inner));
                 }
+                args.push(arg);
             }
-
-            arg
-        };
+        }
 
         FnType {
-            ret: arg_of(sig.output(), true),
-            args: inputs.iter().chain(extra_args.iter()).map(|ty| {
-                arg_of(ty, false)
-            }).collect(),
+            args,
+            ret,
             variadic: sig.variadic,
             cconv,
         }
@@ -862,38 +823,63 @@ impl<'a, 'tcx> FnType<'tcx> {
 
     fn adjust_for_abi(&mut self,
                       ccx: &CrateContext<'a, 'tcx>,
-                      abi: Abi) {
+                      sig: ty::FnSig<'tcx>) {
+        let abi = sig.abi;
         if abi == Abi::Unadjusted { return }
 
         if abi == Abi::Rust || abi == Abi::RustCall ||
            abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
             let fixup = |arg: &mut ArgType<'tcx>| {
-                if arg.is_ignore() { return; }
+                if !arg.layout.is_aggregate() {
+                    return;
+                }
 
-                match arg.layout.abi {
-                    layout::Abi::Aggregate { .. } => {}
-                    _ => return
+                let size = arg.layout.size(ccx);
+
+                if let Some(unit) = arg.layout.homogeneous_aggregate(ccx) {
+                    // Replace newtypes with their inner-most type.
+                    if unit.size == size {
+                        // Needs a cast as we've unpacked a newtype.
+                        arg.cast_to(ccx, unit);
+                        return;
+                    }
+
+                    // Pairs of floats.
+                    if unit.kind == RegKind::Float {
+                        if unit.size.checked_mul(2, ccx) == Some(size) {
+                            // FIXME(eddyb) This should be using Uniform instead of a pair,
+                            // but the resulting [2 x float/double] breaks emscripten.
+                            // See https://github.com/kripken/emscripten-fastcomp/issues/178.
+                            arg.cast_to(ccx, CastTarget::Pair(unit, unit));
+                            return;
+                        }
+                    }
                 }
 
-                let size = arg.layout.size;
                 if size > layout::Pointer.size(ccx) {
-                    arg.make_indirect();
+                    arg.make_indirect(ccx);
                 } else {
                     // We want to pass small aggregates as immediates, but using
                     // a LLVM aggregate type for this leads to bad optimizations,
                     // so we pick an appropriately sized integer type instead.
-                    arg.cast_to(Reg {
+                    arg.cast_to(ccx, Reg {
                         kind: RegKind::Integer,
                         size
                     });
                 }
             };
-            fixup(&mut self.ret);
+            // Fat pointers are returned by-value.
+            if !self.ret.is_ignore() {
+                if !type_is_fat_ptr(ccx, sig.output()) {
+                    fixup(&mut self.ret);
+                }
+            }
             for arg in &mut self.args {
+                if arg.is_ignore() { continue; }
                 fixup(arg);
             }
-            if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
-                attrs.set(ArgAttribute::StructRet);
+            if self.ret.is_indirect() {
+                self.ret.attrs.set(ArgAttribute::StructRet);
             }
             return;
         }
@@ -910,7 +896,7 @@ impl<'a, 'tcx> FnType<'tcx> {
             "x86_64" => if abi == Abi::SysV64 {
                 cabi_x86_64::compute_abi_info(ccx, self);
             } else if abi == Abi::Win64 || ccx.sess().target.target.options.is_like_windows {
-                cabi_x86_win64::compute_abi_info(self);
+                cabi_x86_win64::compute_abi_info(ccx, self);
             } else {
                 cabi_x86_64::compute_abi_info(ccx, self);
             },
@@ -923,52 +909,51 @@ impl<'a, 'tcx> FnType<'tcx> {
             "s390x" => cabi_s390x::compute_abi_info(ccx, self),
             "asmjs" => cabi_asmjs::compute_abi_info(ccx, self),
             "wasm32" => cabi_asmjs::compute_abi_info(ccx, self),
-            "msp430" => cabi_msp430::compute_abi_info(self),
+            "msp430" => cabi_msp430::compute_abi_info(ccx, self),
             "sparc" => cabi_sparc::compute_abi_info(ccx, self),
             "sparc64" => cabi_sparc64::compute_abi_info(ccx, self),
-            "nvptx" => cabi_nvptx::compute_abi_info(self),
-            "nvptx64" => cabi_nvptx64::compute_abi_info(self),
-            "hexagon" => cabi_hexagon::compute_abi_info(self),
+            "nvptx" => cabi_nvptx::compute_abi_info(ccx, self),
+            "nvptx64" => cabi_nvptx64::compute_abi_info(ccx, self),
+            "hexagon" => cabi_hexagon::compute_abi_info(ccx, self),
             a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
         }
 
-        if let PassMode::Indirect(ref mut attrs) = self.ret.mode {
-            attrs.set(ArgAttribute::StructRet);
+        if self.ret.is_indirect() {
+            self.ret.attrs.set(ArgAttribute::StructRet);
         }
     }
 
     pub fn llvm_type(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
         let mut llargument_tys = Vec::new();
 
-        let llreturn_ty = match self.ret.mode {
-            PassMode::Ignore => Type::void(ccx),
-            PassMode::Direct(_) | PassMode::Pair(..) => {
-                self.ret.layout.immediate_llvm_type(ccx)
-            }
-            PassMode::Cast(cast) => cast.llvm_type(ccx),
-            PassMode::Indirect(_) => {
-                llargument_tys.push(self.ret.memory_ty(ccx).ptr_to());
-                Type::void(ccx)
-            }
+        let llreturn_ty = if self.ret.is_ignore() {
+            Type::void(ccx)
+        } else if self.ret.is_indirect() {
+            llargument_tys.push(self.ret.memory_ty(ccx).ptr_to());
+            Type::void(ccx)
+        } else {
+            self.ret.cast.unwrap_or_else(|| {
+                type_of::immediate_type_of(ccx, self.ret.layout.ty)
+            })
         };
 
         for arg in &self.args {
+            if arg.is_ignore() {
+                continue;
+            }
             // add padding
             if let Some(ty) = arg.pad {
-                llargument_tys.push(ty.llvm_type(ccx));
+                llargument_tys.push(ty);
             }
 
-            let llarg_ty = match arg.mode {
-                PassMode::Ignore => continue,
-                PassMode::Direct(_) => arg.layout.immediate_llvm_type(ccx),
-                PassMode::Pair(..) => {
-                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(ccx, 0));
-                    llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(ccx, 1));
-                    continue;
-                }
-                PassMode::Cast(cast) => cast.llvm_type(ccx),
-                PassMode::Indirect(_) => arg.memory_ty(ccx).ptr_to(),
+            let llarg_ty = if arg.is_indirect() {
+                arg.memory_ty(ccx).ptr_to()
+            } else {
+                arg.cast.unwrap_or_else(|| {
+                    type_of::immediate_type_of(ccx, arg.layout.ty)
+                })
             };
+
             llargument_tys.push(llarg_ty);
         }
 
@@ -980,61 +965,31 @@ impl<'a, 'tcx> FnType<'tcx> {
     }
 
     pub fn apply_attrs_llfn(&self, llfn: ValueRef) {
-        let mut i = 0;
-        let mut apply = |attrs: &ArgAttributes| {
-            attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
-            i += 1;
-        };
-        match self.ret.mode {
-            PassMode::Direct(ref attrs) => {
-                attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
-            }
-            PassMode::Indirect(ref attrs) => apply(attrs),
-            _ => {}
+        let mut i = if self.ret.is_indirect() { 1 } else { 0 };
+        if !self.ret.is_ignore() {
+            self.ret.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
         }
+        i += 1;
         for arg in &self.args {
-            if arg.pad.is_some() {
-                apply(&ArgAttributes::new());
-            }
-            match arg.mode {
-                PassMode::Ignore => {}
-                PassMode::Direct(ref attrs) |
-                PassMode::Indirect(ref attrs) => apply(attrs),
-                PassMode::Pair(ref a, ref b) => {
-                    apply(a);
-                    apply(b);
-                }
-                PassMode::Cast(_) => apply(&ArgAttributes::new()),
+            if !arg.is_ignore() {
+                if arg.pad.is_some() { i += 1; }
+                arg.attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn);
+                i += 1;
             }
         }
     }
 
     pub fn apply_attrs_callsite(&self, callsite: ValueRef) {
-        let mut i = 0;
-        let mut apply = |attrs: &ArgAttributes| {
-            attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
-            i += 1;
-        };
-        match self.ret.mode {
-            PassMode::Direct(ref attrs) => {
-                attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite);
-            }
-            PassMode::Indirect(ref attrs) => apply(attrs),
-            _ => {}
+        let mut i = if self.ret.is_indirect() { 1 } else { 0 };
+        if !self.ret.is_ignore() {
+            self.ret.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
         }
+        i += 1;
         for arg in &self.args {
-            if arg.pad.is_some() {
-                apply(&ArgAttributes::new());
-            }
-            match arg.mode {
-                PassMode::Ignore => {}
-                PassMode::Direct(ref attrs) |
-                PassMode::Indirect(ref attrs) => apply(attrs),
-                PassMode::Pair(ref a, ref b) => {
-                    apply(a);
-                    apply(b);
-                }
-                PassMode::Cast(_) => apply(&ArgAttributes::new()),
+            if !arg.is_ignore() {
+                if arg.pad.is_some() { i += 1; }
+                arg.attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite);
+                i += 1;
             }
         }
 
@@ -1043,3 +998,7 @@ impl<'a, 'tcx> FnType<'tcx> {
         }
     }
 }
+
+pub fn align_up_to(off: u64, a: u64) -> u64 {
+    (off + a - 1) / a * a
+}
diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs
new file mode 100644
index 00000000000..b06f8e4e671
--- /dev/null
+++ b/src/librustc_trans/adt.rs
@@ -0,0 +1,497 @@
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! # Representation of Algebraic Data Types
+//!
+//! This module determines how to represent enums, structs, and tuples
+//! based on their monomorphized types; it is responsible both for
+//! choosing a representation and translating basic operations on
+//! values of those types.  (Note: exporting the representations for
+//! debuggers is handled in debuginfo.rs, not here.)
+//!
+//! Note that the interface treats everything as a general case of an
+//! enum, so structs/tuples/etc. have one pseudo-variant with
+//! discriminant 0; i.e., as if they were a univariant enum.
+//!
+//! Having everything in one place will enable improvements to data
+//! structure representation; possibilities include:
+//!
+//! - User-specified alignment (e.g., cacheline-aligning parts of
+//!   concurrently accessed data structures); LLVM can't represent this
+//!   directly, so we'd have to insert padding fields in any structure
+//!   that might contain one and adjust GEP indices accordingly.  See
+//!   issue #4578.
+//!
+//! - Store nested enums' discriminants in the same word.  Rather, if
+//!   some variants start with enums, and those enums representations
+//!   have unused alignment padding between discriminant and body, the
+//!   outer enum's discriminant can be stored there and those variants
+//!   can start at offset 0.  Kind of fancy, and might need work to
+//!   make copies of the inner enum type cooperate, but it could help
+//!   with `Option` or `Result` wrapped around another enum.
+//!
+//! - Tagged pointers would be neat, but given that any type can be
+//!   used unboxed and any field can have pointers (including mutable)
+//!   taken to it, implementing them for Rust seems difficult.
+
+use std;
+
+use llvm::{ValueRef, True, IntEQ, IntNE};
+use rustc::ty::{self, Ty};
+use rustc::ty::layout::{self, LayoutTyper};
+use common::*;
+use builder::Builder;
+use base;
+use machine;
+use monomorphize;
+use type_::Type;
+use type_of;
+
+use mir::lvalue::Alignment;
+
+/// Given an enum, struct, closure, or tuple, extracts fields.
+/// Treats closures as a struct with one variant.
+/// `empty_if_no_variants` is a switch to deal with empty enums.
+/// If true, `variant_index` is disregarded and an empty Vec returned in this case.
+pub fn compute_fields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>,
+                                variant_index: usize,
+                                empty_if_no_variants: bool) -> Vec<Ty<'tcx>> {
+    match t.sty {
+        ty::TyAdt(ref def, _) if def.variants.len() == 0 && empty_if_no_variants => {
+            Vec::default()
+        },
+        ty::TyAdt(ref def, ref substs) => {
+            def.variants[variant_index].fields.iter().map(|f| {
+                monomorphize::field_ty(cx.tcx(), substs, f)
+            }).collect::<Vec<_>>()
+        },
+        ty::TyTuple(fields, _) => fields.to_vec(),
+        ty::TyClosure(def_id, substs) => {
+            if variant_index > 0 { bug!("{} is a closure, which only has one variant", t);}
+            substs.upvar_tys(def_id, cx.tcx()).collect()
+        },
+        ty::TyGenerator(def_id, substs, _) => {
+            if variant_index > 0 { bug!("{} is a generator, which only has one variant", t);}
+            substs.field_tys(def_id, cx.tcx()).map(|t| {
+                cx.tcx().fully_normalize_associated_types_in(&t)
+            }).collect()
+        },
+        _ => bug!("{} is not a type that can have fields.", t)
+    }
+}
+
+/// LLVM-level types are a little complicated.
+///
+/// C-like enums need to be actual ints, not wrapped in a struct,
+/// because that changes the ABI on some platforms (see issue #10308).
+///
+/// For nominal types, in some cases, we need to use LLVM named structs
+/// and fill in the actual contents in a second pass to prevent
+/// unbounded recursion; see also the comments in `trans::type_of`.
+pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
+    generic_type_of(cx, t, None)
+}
+
+pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                    t: Ty<'tcx>, name: &str) -> Type {
+    generic_type_of(cx, t, Some(name))
+}
+
+pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                t: Ty<'tcx>, llty: &mut Type) {
+    let l = cx.layout_of(t);
+    debug!("finish_type_of: {} with layout {:#?}", t, l);
+    match *l {
+        layout::CEnum { .. } | layout::General { .. }
+        | layout::UntaggedUnion { .. } | layout::RawNullablePointer { .. } => { }
+        layout::Univariant { ..}
+        | layout::StructWrappedNullablePointer { .. } => {
+            let (nonnull_variant_index, nonnull_variant, packed) = match *l {
+                layout::Univariant { ref variant, .. } => (0, variant, variant.packed),
+                layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } =>
+                    (nndiscr, nonnull, nonnull.packed),
+                _ => unreachable!()
+            };
+            let fields = compute_fields(cx, t, nonnull_variant_index as usize, true);
+            llty.set_struct_body(&struct_llfields(cx, &fields, nonnull_variant),
+                                 packed)
+        },
+        _ => bug!("This function cannot handle {} with layout {:#?}", t, l)
+    }
+}
+
+fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                             t: Ty<'tcx>,
+                             name: Option<&str>) -> Type {
+    let l = cx.layout_of(t);
+    debug!("adt::generic_type_of t: {:?} name: {:?}", t, name);
+    match *l {
+        layout::CEnum { discr, .. } => Type::from_integer(cx, discr),
+        layout::RawNullablePointer { nndiscr, .. } => {
+            let (def, substs) = match t.sty {
+                ty::TyAdt(d, s) => (d, s),
+                _ => bug!("{} is not an ADT", t)
+            };
+            let nnty = monomorphize::field_ty(cx.tcx(), substs,
+                &def.variants[nndiscr as usize].fields[0]);
+            if let layout::Scalar { value: layout::Pointer, .. } = *cx.layout_of(nnty) {
+                Type::i8p(cx)
+            } else {
+                type_of::type_of(cx, nnty)
+            }
+        }
+        layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
+            let fields = compute_fields(cx, t, nndiscr as usize, false);
+            match name {
+                None => {
+                    Type::struct_(cx, &struct_llfields(cx, &fields, nonnull),
+                                  nonnull.packed)
+                }
+                Some(name) => {
+                    Type::named_struct(cx, name)
+                }
+            }
+        }
+        layout::Univariant { ref variant, .. } => {
+            // Note that this case also handles empty enums.
+            // Thus the true as the final parameter here.
+            let fields = compute_fields(cx, t, 0, true);
+            match name {
+                None => {
+                    let fields = struct_llfields(cx, &fields, &variant);
+                    Type::struct_(cx, &fields, variant.packed)
+                }
+                Some(name) => {
+                    // Hypothesis: named_struct's can never need a
+                    // drop flag. (... needs validation.)
+                    Type::named_struct(cx, name)
+                }
+            }
+        }
+        layout::UntaggedUnion { ref variants, .. }=> {
+            // Use alignment-sized ints to fill all the union storage.
+            let size = variants.stride().bytes();
+            let align = variants.align.abi();
+            let fill = union_fill(cx, size, align);
+            match name {
+                None => {
+                    Type::struct_(cx, &[fill], variants.packed)
+                }
+                Some(name) => {
+                    let mut llty = Type::named_struct(cx, name);
+                    llty.set_struct_body(&[fill], variants.packed);
+                    llty
+                }
+            }
+        }
+        layout::General { discr, size, align, primitive_align, .. } => {
+            // We need a representation that has:
+            // * The alignment of the most-aligned field
+            // * The size of the largest variant (rounded up to that alignment)
+            // * No alignment padding anywhere any variant has actual data
+            //   (currently matters only for enums small enough to be immediate)
+            // * The discriminant in an obvious place.
+            //
+            // So we start with the discriminant, pad it up to the alignment with
+            // more of its own type, then use alignment-sized ints to get the rest
+            // of the size.
+            let size = size.bytes();
+            let align = align.abi();
+            let primitive_align = primitive_align.abi();
+            assert!(align <= std::u32::MAX as u64);
+            let discr_ty = Type::from_integer(cx, discr);
+            let discr_size = discr.size().bytes();
+            let padded_discr_size = roundup(discr_size, align as u32);
+            let variant_part_size = size-padded_discr_size;
+            let variant_fill = union_fill(cx, variant_part_size, primitive_align);
+
+            assert_eq!(machine::llalign_of_min(cx, variant_fill), primitive_align as u32);
+            assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly
+            let fields: Vec<Type> =
+                [discr_ty,
+                 Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size),
+                 variant_fill].iter().cloned().collect();
+            match name {
+                None => {
+                    Type::struct_(cx, &fields, false)
+                }
+                Some(name) => {
+                    let mut llty = Type::named_struct(cx, name);
+                    llty.set_struct_body(&fields, false);
+                    llty
+                }
+            }
+        }
+        _ => bug!("Unsupported type {} represented as {:#?}", t, l)
+    }
+}
+
+fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type {
+    assert_eq!(size%align, 0);
+    assert_eq!(align.count_ones(), 1, "Alignment must be a power fof 2. Got {}", align);
+    let align_units = size/align;
+    let layout_align = layout::Align::from_bytes(align, align).unwrap();
+    if let Some(ity) = layout::Integer::for_abi_align(cx, layout_align) {
+        Type::array(&Type::from_integer(cx, ity), align_units)
+    } else {
+        Type::array(&Type::vector(&Type::i32(cx), align/4),
+                    align_units)
+    }
+}
+
+
+// Double index to account for padding (FieldPath already uses `Struct::memory_index`)
+fn struct_llfields_path(discrfield: &layout::FieldPath) -> Vec<usize> {
+    discrfield.iter().map(|&i| (i as usize) << 1).collect::<Vec<_>>()
+}
+
+
+// Lookup `Struct::memory_index` and double it to account for padding
+pub fn struct_llfields_index(variant: &layout::Struct, index: usize) -> usize {
+    (variant.memory_index[index] as usize) << 1
+}
+
+
+pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, field_tys: &Vec<Ty<'tcx>>,
+                             variant: &layout::Struct) -> Vec<Type> {
+    debug!("struct_llfields: variant: {:?}", variant);
+    let mut first_field = true;
+    let mut min_offset = 0;
+    let mut result: Vec<Type> = Vec::with_capacity(field_tys.len() * 2);
+    let field_iter = variant.field_index_by_increasing_offset().map(|i| {
+        (i, field_tys[i as usize], variant.offsets[i as usize].bytes()) });
+    for (index, ty, target_offset) in field_iter {
+        if first_field {
+            debug!("struct_llfields: {} ty: {} min_offset: {} target_offset: {}",
+                index, ty, min_offset, target_offset);
+            first_field = false;
+        } else {
+            assert!(target_offset >= min_offset);
+            let padding_bytes = if variant.packed { 0 } else { target_offset - min_offset };
+            result.push(Type::array(&Type::i8(cx), padding_bytes));
+            debug!("struct_llfields: {} ty: {} pad_bytes: {} min_offset: {} target_offset: {}",
+                index, ty, padding_bytes, min_offset, target_offset);
+        }
+        let llty = type_of::in_memory_type_of(cx, ty);
+        result.push(llty);
+        let layout = cx.layout_of(ty);
+        let target_size = layout.size(&cx.tcx().data_layout).bytes();
+        min_offset = target_offset + target_size;
+    }
+    if variant.sized && !field_tys.is_empty() {
+        if variant.stride().bytes() < min_offset {
+            bug!("variant: {:?} stride: {} min_offset: {}", variant, variant.stride().bytes(),
+            min_offset);
+        }
+        let padding_bytes = variant.stride().bytes() - min_offset;
+        debug!("struct_llfields: pad_bytes: {} min_offset: {} min_size: {} stride: {}\n",
+               padding_bytes, min_offset, variant.min_size.bytes(), variant.stride().bytes());
+        result.push(Type::array(&Type::i8(cx), padding_bytes));
+        assert!(result.len() == (field_tys.len() * 2));
+    } else {
+        debug!("struct_llfields: min_offset: {} min_size: {} stride: {}\n",
+               min_offset, variant.min_size.bytes(), variant.stride().bytes());
+    }
+
+    result
+}
+
+pub fn is_discr_signed<'tcx>(l: &layout::Layout) -> bool {
+    match *l {
+        layout::CEnum { signed, .. }=> signed,
+        _ => false,
+    }
+}
+
+/// Obtain the actual discriminant of a value.
+pub fn trans_get_discr<'a, 'tcx>(
+    bcx: &Builder<'a, 'tcx>,
+    t: Ty<'tcx>,
+    scrutinee: ValueRef,
+    alignment: Alignment,
+    cast_to: Option<Type>,
+    range_assert: bool
+) -> ValueRef {
+    debug!("trans_get_discr t: {:?}", t);
+    let l = bcx.ccx.layout_of(t);
+
+    let val = match *l {
+        layout::CEnum { discr, min, max, .. } => {
+            load_discr(bcx, discr, scrutinee, alignment, min, max, range_assert)
+        }
+        layout::General { discr, ref variants, .. } => {
+            let ptr = bcx.struct_gep(scrutinee, 0);
+            load_discr(bcx, discr, ptr, alignment,
+                       0, variants.len() as u64 - 1,
+                       range_assert)
+        }
+        layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0),
+        layout::RawNullablePointer { nndiscr, .. } => {
+            let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
+            let discr = bcx.load(scrutinee, alignment.to_align());
+            bcx.icmp(cmp, discr, C_null(val_ty(discr)))
+        }
+        layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
+            struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee, alignment)
+        },
+        _ => bug!("{} is not an enum", t)
+    };
+    match cast_to {
+        None => val,
+        Some(llty) => bcx.intcast(val, llty, is_discr_signed(&l))
+    }
+}
+
+fn struct_wrapped_nullable_bitdiscr(
+    bcx: &Builder,
+    nndiscr: u64,
+    discrfield: &layout::FieldPath,
+    scrutinee: ValueRef,
+    alignment: Alignment,
+) -> ValueRef {
+    let path = struct_llfields_path(discrfield);
+    let llptrptr = bcx.gepi(scrutinee, &path);
+    let llptr = bcx.load(llptrptr, alignment.to_align());
+    let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
+    bcx.icmp(cmp, llptr, C_null(val_ty(llptr)))
+}
+
+/// Helper for cases where the discriminant is simply loaded.
+fn load_discr(bcx: &Builder, ity: layout::Integer, ptr: ValueRef,
+              alignment: Alignment, min: u64, max: u64,
+              range_assert: bool)
+    -> ValueRef {
+    let llty = Type::from_integer(bcx.ccx, ity);
+    assert_eq!(val_ty(ptr), llty.ptr_to());
+    let bits = ity.size().bits();
+    assert!(bits <= 64);
+    let bits = bits as usize;
+    let mask = !0u64 >> (64 - bits);
+    // For a (max) discr of -1, max will be `-1 as usize`, which overflows.
+    // However, that is fine here (it would still represent the full range),
+    if max.wrapping_add(1) & mask == min & mask || !range_assert {
+        // i.e., if the range is everything.  The lo==hi case would be
+        // rejected by the LLVM verifier (it would mean either an
+        // empty set, which is impossible, or the entire range of the
+        // type, which is pointless).
+        bcx.load(ptr, alignment.to_align())
+    } else {
+        // llvm::ConstantRange can deal with ranges that wrap around,
+        // so an overflow on (max + 1) is fine.
+        bcx.load_range_assert(ptr, min, max.wrapping_add(1), /* signed: */ True,
+                              alignment.to_align())
+    }
+}
+
+/// Set the discriminant for a new value of the given case of the given
+/// representation.
+pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: ValueRef, to: u64) {
+    let l = bcx.ccx.layout_of(t);
+    match *l {
+        layout::CEnum{ discr, min, max, .. } => {
+            assert_discr_in_range(min, max, to);
+            bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64),
+                  val, None);
+        }
+        layout::General{ discr, .. } => {
+            bcx.store(C_int(Type::from_integer(bcx.ccx, discr), to as i64),
+                  bcx.struct_gep(val, 0), None);
+        }
+        layout::Univariant { .. }
+        | layout::UntaggedUnion { .. }
+        | layout::Vector { .. } => {
+            assert_eq!(to, 0);
+        }
+        layout::RawNullablePointer { nndiscr, .. } => {
+            if to != nndiscr {
+                let llptrty = val_ty(val).element_type();
+                bcx.store(C_null(llptrty), val, None);
+            }
+        }
+        layout::StructWrappedNullablePointer { nndiscr, ref discrfield, ref nonnull, .. } => {
+            if to != nndiscr {
+                if target_sets_discr_via_memset(bcx) {
+                    // Issue #34427: As workaround for LLVM bug on
+                    // ARM, use memset of 0 on whole struct rather
+                    // than storing null to single target field.
+                    let llptr = bcx.pointercast(val, Type::i8(bcx.ccx).ptr_to());
+                    let fill_byte = C_u8(bcx.ccx, 0);
+                    let size = C_usize(bcx.ccx, nonnull.stride().bytes());
+                    let align = C_i32(bcx.ccx, nonnull.align.abi() as i32);
+                    base::call_memset(bcx, llptr, fill_byte, size, align, false);
+                } else {
+                    let path = struct_llfields_path(discrfield);
+                    let llptrptr = bcx.gepi(val, &path);
+                    let llptrty = val_ty(llptrptr).element_type();
+                    bcx.store(C_null(llptrty), llptrptr, None);
+                }
+            }
+        }
+        _ => bug!("Cannot handle {} represented as {:#?}", t, l)
+    }
+}
+
+fn target_sets_discr_via_memset<'a, 'tcx>(bcx: &Builder<'a, 'tcx>) -> bool {
+    bcx.sess().target.target.arch == "arm" || bcx.sess().target.target.arch == "aarch64"
+}
+
+pub fn assert_discr_in_range<D: PartialOrd>(min: D, max: D, discr: D) {
+    if min <= max {
+        assert!(min <= discr && discr <= max)
+    } else {
+        assert!(min <= discr || discr <= max)
+    }
+}
+
+// FIXME this utility routine should be somewhere more general
+#[inline]
+fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
+
+/// Extract a field of a constant value, as appropriate for its
+/// representation.
+///
+/// (Not to be confused with `common::const_get_elt`, which operates on
+/// raw LLVM-level structs and arrays.)
+pub fn const_get_field<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>,
+                       val: ValueRef,
+                       ix: usize) -> ValueRef {
+    let l = ccx.layout_of(t);
+    match *l {
+        layout::CEnum { .. } => bug!("element access in C-like enum const"),
+        layout::Univariant { ref variant, .. } => {
+            const_struct_field(val, variant.memory_index[ix] as usize)
+        }
+        layout::Vector { .. } => const_struct_field(val, ix),
+        layout::UntaggedUnion { .. } => const_struct_field(val, 0),
+        _ => bug!("{} does not have fields.", t)
+    }
+}
+
+/// Extract field of struct-like const, skipping our alignment padding.
+fn const_struct_field(val: ValueRef, ix: usize) -> ValueRef {
+    // Get the ix-th non-undef element of the struct.
+    let mut real_ix = 0; // actual position in the struct
+    let mut ix = ix; // logical index relative to real_ix
+    let mut field;
+    loop {
+        loop {
+            field = const_get_elt(val, &[real_ix]);
+            if !is_undef(field) {
+                break;
+            }
+            real_ix = real_ix + 1;
+        }
+        if ix == 0 {
+            return field;
+        }
+        ix = ix - 1;
+        real_ix = real_ix + 1;
+    }
+}
diff --git a/src/librustc_trans/asm.rs b/src/librustc_trans/asm.rs
index 1959fd13ccb..92cbd004206 100644
--- a/src/librustc_trans/asm.rs
+++ b/src/librustc_trans/asm.rs
@@ -11,15 +11,16 @@
 //! # Translation of inline assembly.
 
 use llvm::{self, ValueRef};
+use base;
 use common::*;
+use type_of;
 use type_::Type;
-use type_of::LayoutLlvmExt;
 use builder::Builder;
 
 use rustc::hir;
+use rustc::ty::Ty;
 
-use mir::lvalue::LvalueRef;
-use mir::operand::OperandValue;
+use mir::lvalue::Alignment;
 
 use std::ffi::CString;
 use syntax::ast::AsmDialect;
@@ -29,7 +30,7 @@ use libc::{c_uint, c_char};
 pub fn trans_inline_asm<'a, 'tcx>(
     bcx: &Builder<'a, 'tcx>,
     ia: &hir::InlineAsm,
-    outputs: Vec<LvalueRef<'tcx>>,
+    outputs: Vec<(ValueRef, Ty<'tcx>)>,
     mut inputs: Vec<ValueRef>
 ) {
     let mut ext_constraints = vec![];
@@ -37,15 +38,20 @@ pub fn trans_inline_asm<'a, 'tcx>(
 
     // Prepare the output operands
     let mut indirect_outputs = vec![];
-    for (i, (out, lvalue)) in ia.outputs.iter().zip(&outputs).enumerate() {
+    for (i, (out, &(val, ty))) in ia.outputs.iter().zip(&outputs).enumerate() {
+        let val = if out.is_rw || out.is_indirect {
+            Some(base::load_ty(bcx, val, Alignment::Packed, ty))
+        } else {
+            None
+        };
         if out.is_rw {
-            inputs.push(lvalue.load(bcx).immediate());
+            inputs.push(val.unwrap());
             ext_constraints.push(i.to_string());
         }
         if out.is_indirect {
-            indirect_outputs.push(lvalue.load(bcx).immediate());
+            indirect_outputs.push(val.unwrap());
         } else {
-            output_types.push(lvalue.layout.llvm_type(bcx.ccx));
+            output_types.push(type_of::type_of(bcx.ccx, ty));
         }
     }
     if !indirect_outputs.is_empty() {
@@ -100,9 +106,9 @@ pub fn trans_inline_asm<'a, 'tcx>(
 
     // Again, based on how many outputs we have
     let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
-    for (i, (_, &lvalue)) in outputs.enumerate() {
-        let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i as u64) };
-        OperandValue::Immediate(v).store(bcx, lvalue);
+    for (i, (_, &(val, _))) in outputs.enumerate() {
+        let v = if num_outputs == 1 { r } else { bcx.extract_value(r, i) };
+        bcx.store(v, val, None);
     }
 
     // Store mark in a metadata node so we can map LLVM errors
diff --git a/src/librustc_trans/attributes.rs b/src/librustc_trans/attributes.rs
index 745aa0da829..b6ca1460a7d 100644
--- a/src/librustc_trans/attributes.rs
+++ b/src/librustc_trans/attributes.rs
@@ -116,7 +116,7 @@ pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRe
             naked(llfn, true);
         } else if attr.check_name("allocator") {
             Attribute::NoAlias.apply_llfn(
-                llvm::AttributePlace::ReturnValue, llfn);
+                llvm::AttributePlace::ReturnValue(), llfn);
         } else if attr.check_name("unwind") {
             unwind(llfn, true);
         } else if attr.check_name("rustc_allocator_nounwind") {
diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs
index b7408681ed0..69bcd0aa50b 100644
--- a/src/librustc_trans/base.rs
+++ b/src/librustc_trans/base.rs
@@ -28,7 +28,6 @@ use super::ModuleSource;
 use super::ModuleTranslation;
 use super::ModuleKind;
 
-use abi;
 use assert_module_sources;
 use back::link;
 use back::symbol_export;
@@ -41,7 +40,6 @@ use rustc::middle::lang_items::StartFnLangItem;
 use rustc::middle::trans::{Linkage, Visibility, Stats};
 use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes};
 use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::layout::{self, Align, TyLayout, LayoutOf};
 use rustc::ty::maps::Providers;
 use rustc::dep_graph::{DepNode, DepKind, DepConstructor};
 use rustc::middle::cstore::{self, LinkMeta, LinkagePreference};
@@ -49,6 +47,7 @@ use rustc::util::common::{time, print_time_passes_entry};
 use rustc::session::config::{self, NoDebugInfo};
 use rustc::session::Session;
 use rustc_incremental;
+use abi;
 use allocator;
 use mir::lvalue::LvalueRef;
 use attributes;
@@ -56,20 +55,25 @@ use builder::Builder;
 use callee;
 use common::{C_bool, C_bytes_in_context, C_i32, C_usize};
 use collector::{self, TransItemCollectionMode};
-use common::{self, C_struct_in_context, C_array, CrateContext, val_ty};
+use common::{C_struct_in_context, C_u64, C_undef, C_array};
+use common::CrateContext;
+use common::{type_is_zero_size, val_ty};
+use common;
 use consts;
 use context::{self, LocalCrateContext, SharedCrateContext};
 use debuginfo;
 use declare;
+use machine;
 use meth;
 use mir;
-use monomorphize::Instance;
+use monomorphize::{self, Instance};
 use partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt};
 use symbol_names_test;
 use time_graph;
 use trans_item::{TransItem, BaseTransItemExt, TransItemExt, DefPathBasedNames};
 use type_::Type;
-use type_of::LayoutLlvmExt;
+use type_of;
+use value::Value;
 use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet};
 use CrateInfo;
 
@@ -86,7 +90,7 @@ use syntax::attr;
 use rustc::hir;
 use syntax::ast;
 
-use mir::operand::OperandValue;
+use mir::lvalue::Alignment;
 
 pub use rustc_trans_utils::{find_exported_symbols, check_for_rustc_errors_attr};
 pub use rustc_trans_utils::trans_item::linkage_by_name;
@@ -121,6 +125,14 @@ impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
     }
 }
 
+pub fn get_meta(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef {
+    bcx.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA)
+}
+
+pub fn get_dataptr(bcx: &Builder, fat_ptr: ValueRef) -> ValueRef {
+    bcx.struct_gep(fat_ptr, abi::FAT_PTR_ADDR)
+}
+
 pub fn bin_op_to_icmp_predicate(op: hir::BinOp_,
                                 signed: bool)
                                 -> llvm::IntPredicate {
@@ -204,10 +216,8 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
             old_info.expect("unsized_info: missing old info for trait upcast")
         }
         (_, &ty::TyDynamic(ref data, ..)) => {
-            let vtable_ptr = ccx.layout_of(ccx.tcx().mk_mut_ptr(target))
-                .field(ccx, abi::FAT_PTR_EXTRA);
             consts::ptrcast(meth::get_vtable(ccx, source, data.principal()),
-                            vtable_ptr.llvm_type(ccx))
+                            Type::vtable_ptr(ccx))
         }
         _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
                                      source,
@@ -231,40 +241,15 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
         (&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
          &ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
             assert!(bcx.ccx.shared().type_is_sized(a));
-            let ptr_ty = bcx.ccx.layout_of(b).llvm_type(bcx.ccx).ptr_to();
+            let ptr_ty = type_of::in_memory_type_of(bcx.ccx, b).ptr_to();
             (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None))
         }
         (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
             let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
             assert!(bcx.ccx.shared().type_is_sized(a));
-            let ptr_ty = bcx.ccx.layout_of(b).llvm_type(bcx.ccx).ptr_to();
+            let ptr_ty = type_of::in_memory_type_of(bcx.ccx, b).ptr_to();
             (bcx.pointercast(src, ptr_ty), unsized_info(bcx.ccx, a, b, None))
         }
-        (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
-            assert_eq!(def_a, def_b);
-
-            let src_layout = bcx.ccx.layout_of(src_ty);
-            let dst_layout = bcx.ccx.layout_of(dst_ty);
-            let mut result = None;
-            for i in 0..src_layout.fields.count() {
-                let src_f = src_layout.field(bcx.ccx, i);
-                assert_eq!(src_layout.fields.offset(i).bytes(), 0);
-                assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
-                if src_f.is_zst() {
-                    continue;
-                }
-                assert_eq!(src_layout.size, src_f.size);
-
-                let dst_f = dst_layout.field(bcx.ccx, i);
-                assert_ne!(src_f.ty, dst_f.ty);
-                assert_eq!(result, None);
-                result = Some(unsize_thin_ptr(bcx, src, src_f.ty, dst_f.ty));
-            }
-            let (lldata, llextra) = result.unwrap();
-            // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-            (bcx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bcx.ccx, 0)),
-             bcx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bcx.ccx, 1)))
-        }
         _ => bug!("unsize_thin_ptr: called on bad types"),
     }
 }
@@ -272,26 +257,25 @@ pub fn unsize_thin_ptr<'a, 'tcx>(
 /// Coerce `src`, which is a reference to a value of type `src_ty`,
 /// to a value of type `dst_ty` and store the result in `dst`
 pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
-                                     src: LvalueRef<'tcx>,
-                                     dst: LvalueRef<'tcx>) {
-    let src_ty = src.layout.ty;
-    let dst_ty = dst.layout.ty;
+                                     src: &LvalueRef<'tcx>,
+                                     dst: &LvalueRef<'tcx>) {
+    let src_ty = src.ty.to_ty(bcx.tcx());
+    let dst_ty = dst.ty.to_ty(bcx.tcx());
     let coerce_ptr = || {
-        let (base, info) = match src.load(bcx).val {
-            OperandValue::Pair(base, info) => {
-                // fat-ptr to fat-ptr unsize preserves the vtable
-                // i.e. &'a fmt::Debug+Send => &'a fmt::Debug
-                // So we need to pointercast the base to ensure
-                // the types match up.
-                let thin_ptr = dst.layout.field(bcx.ccx, abi::FAT_PTR_ADDR);
-                (bcx.pointercast(base, thin_ptr.llvm_type(bcx.ccx)), info)
-            }
-            OperandValue::Immediate(base) => {
-                unsize_thin_ptr(bcx, base, src_ty, dst_ty)
-            }
-            OperandValue::Ref(..) => bug!()
+        let (base, info) = if common::type_is_fat_ptr(bcx.ccx, src_ty) {
+            // fat-ptr to fat-ptr unsize preserves the vtable
+            // i.e. &'a fmt::Debug+Send => &'a fmt::Debug
+            // So we need to pointercast the base to ensure
+            // the types match up.
+            let (base, info) = load_fat_ptr(bcx, src.llval, src.alignment, src_ty);
+            let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty);
+            let base = bcx.pointercast(base, llcast_ty);
+            (base, info)
+        } else {
+            let base = load_ty(bcx, src.llval, src.alignment, src_ty);
+            unsize_thin_ptr(bcx, base, src_ty, dst_ty)
         };
-        OperandValue::Pair(base, info).store(bcx, dst);
+        store_fat_ptr(bcx, base, info, dst.llval, dst.alignment, dst_ty);
     };
     match (&src_ty.sty, &dst_ty.sty) {
         (&ty::TyRef(..), &ty::TyRef(..)) |
@@ -303,22 +287,32 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             coerce_ptr()
         }
 
-        (&ty::TyAdt(def_a, _), &ty::TyAdt(def_b, _)) => {
+        (&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => {
             assert_eq!(def_a, def_b);
 
-            for i in 0..def_a.variants[0].fields.len() {
-                let src_f = src.project_field(bcx, i);
-                let dst_f = dst.project_field(bcx, i);
+            let src_fields = def_a.variants[0].fields.iter().map(|f| {
+                monomorphize::field_ty(bcx.tcx(), substs_a, f)
+            });
+            let dst_fields = def_b.variants[0].fields.iter().map(|f| {
+                monomorphize::field_ty(bcx.tcx(), substs_b, f)
+            });
 
-                if dst_f.layout.is_zst() {
+            let iter = src_fields.zip(dst_fields).enumerate();
+            for (i, (src_fty, dst_fty)) in iter {
+                if type_is_zero_size(bcx.ccx, dst_fty) {
                     continue;
                 }
 
-                if src_f.layout.ty == dst_f.layout.ty {
-                    memcpy_ty(bcx, dst_f.llval, src_f.llval, src_f.layout,
-                        (src_f.alignment | dst_f.alignment).non_abi());
+                let (src_f, src_f_align) = src.trans_field_ptr(bcx, i);
+                let (dst_f, dst_f_align) = dst.trans_field_ptr(bcx, i);
+                if src_fty == dst_fty {
+                    memcpy_ty(bcx, dst_f, src_f, src_fty, None);
                 } else {
-                    coerce_unsized_into(bcx, src_f, dst_f);
+                    coerce_unsized_into(
+                        bcx,
+                        &LvalueRef::new_sized_ty(src_f, src_fty, src_f_align),
+                        &LvalueRef::new_sized_ty(dst_f, dst_fty, dst_f_align)
+                    );
                 }
             }
         }
@@ -391,6 +385,94 @@ pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
     b.call(assume_intrinsic, &[val], None);
 }
 
+/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
+/// differs from the type used for SSA values. Also handles various special cases where the type
+/// gives us better information about what we are loading.
+pub fn load_ty<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef,
+                         alignment: Alignment, t: Ty<'tcx>) -> ValueRef {
+    let ccx = b.ccx;
+    if type_is_zero_size(ccx, t) {
+        return C_undef(type_of::type_of(ccx, t));
+    }
+
+    unsafe {
+        let global = llvm::LLVMIsAGlobalVariable(ptr);
+        if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
+            let val = llvm::LLVMGetInitializer(global);
+            if !val.is_null() {
+                if t.is_bool() {
+                    return llvm::LLVMConstTrunc(val, Type::i1(ccx).to_ref());
+                }
+                return val;
+            }
+        }
+    }
+
+    if t.is_bool() {
+        b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False, alignment.to_align()),
+                Type::i1(ccx))
+    } else if t.is_char() {
+        // a char is a Unicode codepoint, and so takes values from 0
+        // to 0x10FFFF inclusive only.
+        b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False, alignment.to_align())
+    } else if (t.is_region_ptr() || t.is_box() || t.is_fn())
+        && !common::type_is_fat_ptr(ccx, t)
+    {
+        b.load_nonnull(ptr, alignment.to_align())
+    } else {
+        b.load(ptr, alignment.to_align())
+    }
+}
+
+/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
+/// differs from the type used for SSA values.
+pub fn store_ty<'a, 'tcx>(cx: &Builder<'a, 'tcx>, v: ValueRef, dst: ValueRef,
+                          dst_align: Alignment, t: Ty<'tcx>) {
+    debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
+
+    if common::type_is_fat_ptr(cx.ccx, t) {
+        let lladdr = cx.extract_value(v, abi::FAT_PTR_ADDR);
+        let llextra = cx.extract_value(v, abi::FAT_PTR_EXTRA);
+        store_fat_ptr(cx, lladdr, llextra, dst, dst_align, t);
+    } else {
+        cx.store(from_immediate(cx, v), dst, dst_align.to_align());
+    }
+}
+
+pub fn store_fat_ptr<'a, 'tcx>(cx: &Builder<'a, 'tcx>,
+                               data: ValueRef,
+                               extra: ValueRef,
+                               dst: ValueRef,
+                               dst_align: Alignment,
+                               _ty: Ty<'tcx>) {
+    // FIXME: emit metadata
+    cx.store(data, get_dataptr(cx, dst), dst_align.to_align());
+    cx.store(extra, get_meta(cx, dst), dst_align.to_align());
+}
+
+pub fn load_fat_ptr<'a, 'tcx>(
+    b: &Builder<'a, 'tcx>, src: ValueRef, alignment: Alignment, t: Ty<'tcx>
+) -> (ValueRef, ValueRef) {
+    let ptr = get_dataptr(b, src);
+    let ptr = if t.is_region_ptr() || t.is_box() {
+        b.load_nonnull(ptr, alignment.to_align())
+    } else {
+        b.load(ptr, alignment.to_align())
+    };
+
+    let meta = get_meta(b, src);
+    let meta_ty = val_ty(meta);
+    // If the 'meta' field is a pointer, it's a vtable, so use load_nonnull
+    // instead
+    let meta = if meta_ty.element_type().kind() == llvm::TypeKind::Pointer {
+        b.load_nonnull(meta, None)
+    } else {
+        b.load(meta, None)
+    };
+
+    (ptr, meta)
+}
+
 pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef {
     if val_ty(val) == Type::i1(bcx.ccx) {
         bcx.zext(val, Type::i8(bcx.ccx))
@@ -399,20 +481,50 @@ pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef {
     }
 }
 
-pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef {
-    if let layout::Abi::Scalar(ref scalar) = layout.abi {
-        if scalar.is_bool() {
-            return bcx.trunc(val, Type::i1(bcx.ccx));
+pub fn to_immediate(bcx: &Builder, val: ValueRef, ty: Ty) -> ValueRef {
+    if ty.is_bool() {
+        bcx.trunc(val, Type::i1(bcx.ccx))
+    } else {
+        val
+    }
+}
+
+pub enum Lifetime { Start, End }
+
+impl Lifetime {
+    // If LLVM lifetime intrinsic support is enabled (i.e. optimizations
+    // on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
+    // and the intrinsic for `lt` and passes them to `emit`, which is in
+    // charge of generating code to call the passed intrinsic on whatever
+    // block of generated code is targeted for the intrinsic.
+    //
+    // If LLVM lifetime intrinsic support is disabled (i.e.  optimizations
+    // off) or `ptr` is zero-sized, then no-op (does not call `emit`).
+    pub fn call(self, b: &Builder, ptr: ValueRef) {
+        if b.ccx.sess().opts.optimize == config::OptLevel::No {
+            return;
         }
+
+        let size = machine::llsize_of_alloc(b.ccx, val_ty(ptr).element_type());
+        if size == 0 {
+            return;
+        }
+
+        let lifetime_intrinsic = b.ccx.get_intrinsic(match self {
+            Lifetime::Start => "llvm.lifetime.start",
+            Lifetime::End => "llvm.lifetime.end"
+        });
+
+        let ptr = b.pointercast(ptr, Type::i8p(b.ccx));
+        b.call(lifetime_intrinsic, &[C_u64(b.ccx, size), ptr], None);
     }
-    val
 }
 
-pub fn call_memcpy(b: &Builder,
-                   dst: ValueRef,
-                   src: ValueRef,
-                   n_bytes: ValueRef,
-                   align: Align) {
+pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>,
+                               dst: ValueRef,
+                               src: ValueRef,
+                               n_bytes: ValueRef,
+                               align: u32) {
     let ccx = b.ccx;
     let ptr_width = &ccx.sess().target.target.target_pointer_width;
     let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width);
@@ -420,7 +532,7 @@ pub fn call_memcpy(b: &Builder,
     let src_ptr = b.pointercast(src, Type::i8p(ccx));
     let dst_ptr = b.pointercast(dst, Type::i8p(ccx));
     let size = b.intcast(n_bytes, ccx.isize_ty(), false);
-    let align = C_i32(ccx, align.abi() as i32);
+    let align = C_i32(ccx, align as i32);
     let volatile = C_bool(ccx, false);
     b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
 }
@@ -429,16 +541,18 @@ pub fn memcpy_ty<'a, 'tcx>(
     bcx: &Builder<'a, 'tcx>,
     dst: ValueRef,
     src: ValueRef,
-    layout: TyLayout<'tcx>,
-    align: Option<Align>,
+    t: Ty<'tcx>,
+    align: Option<u32>,
 ) {
-    let size = layout.size.bytes();
+    let ccx = bcx.ccx;
+
+    let size = ccx.size_of(t);
     if size == 0 {
         return;
     }
 
-    let align = align.unwrap_or(layout.align);
-    call_memcpy(bcx, dst, src, C_usize(bcx.ccx, size), align);
+    let align = align.unwrap_or_else(|| ccx.align_of(t));
+    call_memcpy(bcx, dst, src, C_usize(ccx, size), align);
 }
 
 pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>,
diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs
index 50e673bdbfd..b366d5579c3 100644
--- a/src/librustc_trans/builder.rs
+++ b/src/librustc_trans/builder.rs
@@ -15,16 +15,15 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
 use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef};
 use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef};
 use common::*;
+use machine::llalign_of_pref;
 use type_::Type;
 use value::Value;
 use libc::{c_uint, c_char};
 use rustc::ty::TyCtxt;
-use rustc::ty::layout::{Align, Size};
-use rustc::session::{config, Session};
+use rustc::session::Session;
 
 use std::borrow::Cow;
 use std::ffi::CString;
-use std::ops::Range;
 use std::ptr;
 use syntax_pos::Span;
 
@@ -488,7 +487,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
         }
     }
 
-    pub fn alloca(&self, ty: Type, name: &str, align: Align) -> ValueRef {
+    pub fn alloca(&self, ty: Type, name: &str, align: Option<u32>) -> ValueRef {
         let builder = Builder::with_ccx(self.ccx);
         builder.position_at_start(unsafe {
             llvm::LLVMGetFirstBasicBlock(self.llfn())
@@ -496,7 +495,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
         builder.dynamic_alloca(ty, name, align)
     }
 
-    pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Align) -> ValueRef {
+    pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Option<u32>) -> ValueRef {
         self.count_insn("alloca");
         unsafe {
             let alloca = if name.is_empty() {
@@ -506,7 +505,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
                 llvm::LLVMBuildAlloca(self.llbuilder, ty.to_ref(),
                                       name.as_ptr())
             };
-            llvm::LLVMSetAlignment(alloca, align.abi() as c_uint);
+            if let Some(align) = align {
+                llvm::LLVMSetAlignment(alloca, align as c_uint);
+            }
             alloca
         }
     }
@@ -518,12 +519,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
         }
     }
 
-    pub fn load(&self, ptr: ValueRef, align: Option<Align>) -> ValueRef {
+    pub fn load(&self, ptr: ValueRef, align: Option<u32>) -> ValueRef {
         self.count_insn("load");
         unsafe {
             let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
             if let Some(align) = align {
-                llvm::LLVMSetAlignment(load, align.abi() as c_uint);
+                llvm::LLVMSetAlignment(load, align as c_uint);
             }
             load
         }
@@ -538,42 +539,49 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
         }
     }
 
-    pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering, align: Align) -> ValueRef {
+    pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef {
         self.count_insn("load.atomic");
         unsafe {
-            let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order);
-            // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here?
-            // However, 64-bit atomic loads on `i686-apple-darwin` appear to
-            // require `___atomic_load` with ABI-alignment, so it's staying.
-            llvm::LLVMSetAlignment(load, align.pref() as c_uint);
-            load
+            let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
+            let align = llalign_of_pref(self.ccx, ty.element_type());
+            llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order,
+                                          align as c_uint)
         }
     }
 
 
-    pub fn range_metadata(&self, load: ValueRef, range: Range<u128>) {
+    pub fn load_range_assert(&self, ptr: ValueRef, lo: u64,
+                             hi: u64, signed: llvm::Bool,
+                             align: Option<u32>) -> ValueRef {
+        let value = self.load(ptr, align);
+
         unsafe {
-            let llty = val_ty(load);
-            let v = [
-                C_uint_big(llty, range.start),
-                C_uint_big(llty, range.end)
-            ];
+            let t = llvm::LLVMGetElementType(llvm::LLVMTypeOf(ptr));
+            let min = llvm::LLVMConstInt(t, lo, signed);
+            let max = llvm::LLVMConstInt(t, hi, signed);
+
+            let v = [min, max];
 
-            llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
+            llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint,
                                   llvm::LLVMMDNodeInContext(self.ccx.llcx(),
                                                             v.as_ptr(),
                                                             v.len() as c_uint));
         }
+
+        value
     }
 
-    pub fn nonnull_metadata(&self, load: ValueRef) {
+    pub fn load_nonnull(&self, ptr: ValueRef, align: Option<u32>) -> ValueRef {
+        let value = self.load(ptr, align);
         unsafe {
-            llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint,
+            llvm::LLVMSetMetadata(value, llvm::MD_nonnull as c_uint,
                                   llvm::LLVMMDNodeInContext(self.ccx.llcx(), ptr::null(), 0));
         }
+
+        value
     }
 
-    pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option<Align>) -> ValueRef {
+    pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option<u32>) -> ValueRef {
         debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
         assert!(!self.llbuilder.is_null());
         self.count_insn("store");
@@ -581,7 +589,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
         unsafe {
             let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
             if let Some(align) = align {
-                llvm::LLVMSetAlignment(store, align.abi() as c_uint);
+                llvm::LLVMSetAlignment(store, align as c_uint);
             }
             store
         }
@@ -599,16 +607,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
         }
     }
 
-    pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef,
-                        order: AtomicOrdering, align: Align) {
+    pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
         debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
         self.count_insn("store.atomic");
         let ptr = self.check_store(val, ptr);
         unsafe {
-            let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order);
-            // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here?
-            // Also see `atomic_load` for more context.
-            llvm::LLVMSetAlignment(store, align.pref() as c_uint);
+            let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
+            let align = llalign_of_pref(self.ccx, ty.element_type());
+            llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order, align as c_uint);
         }
     }
 
@@ -620,6 +626,25 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
         }
     }
 
+    // Simple wrapper around GEP that takes an array of ints and wraps them
+    // in C_i32()
+    #[inline]
+    pub fn gepi(&self, base: ValueRef, ixs: &[usize]) -> ValueRef {
+        // Small vector optimization. This should catch 100% of the cases that
+        // we care about.
+        if ixs.len() < 16 {
+            let mut small_vec = [ C_i32(self.ccx, 0); 16 ];
+            for (small_vec_e, &ix) in small_vec.iter_mut().zip(ixs) {
+                *small_vec_e = C_i32(self.ccx, ix as i32);
+            }
+            self.inbounds_gep(base, &small_vec[..ixs.len()])
+        } else {
+            let v = ixs.iter().map(|i| C_i32(self.ccx, *i as i32)).collect::<Vec<ValueRef>>();
+            self.count_insn("gepi");
+            self.inbounds_gep(base, &v)
+        }
+    }
+
     pub fn inbounds_gep(&self, ptr: ValueRef, indices: &[ValueRef]) -> ValueRef {
         self.count_insn("inboundsgep");
         unsafe {
@@ -628,9 +653,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
         }
     }
 
-    pub fn struct_gep(&self, ptr: ValueRef, idx: u64) -> ValueRef {
+    pub fn struct_gep(&self, ptr: ValueRef, idx: usize) -> ValueRef {
         self.count_insn("structgep");
-        assert_eq!(idx as c_uint as u64, idx);
         unsafe {
             llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname())
         }
@@ -936,18 +960,16 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
         }
     }
 
-    pub fn extract_value(&self, agg_val: ValueRef, idx: u64) -> ValueRef {
+    pub fn extract_value(&self, agg_val: ValueRef, idx: usize) -> ValueRef {
         self.count_insn("extractvalue");
-        assert_eq!(idx as c_uint as u64, idx);
         unsafe {
             llvm::LLVMBuildExtractValue(self.llbuilder, agg_val, idx as c_uint, noname())
         }
     }
 
     pub fn insert_value(&self, agg_val: ValueRef, elt: ValueRef,
-                       idx: u64) -> ValueRef {
+                       idx: usize) -> ValueRef {
         self.count_insn("insertvalue");
-        assert_eq!(idx as c_uint as u64, idx);
         unsafe {
             llvm::LLVMBuildInsertValue(self.llbuilder, agg_val, elt, idx as c_uint,
                                        noname())
@@ -1129,12 +1151,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
 
     pub fn add_case(&self, s: ValueRef, on_val: ValueRef, dest: BasicBlockRef) {
         unsafe {
+            if llvm::LLVMIsUndef(s) == llvm::True { return; }
             llvm::LLVMAddCase(s, on_val, dest)
         }
     }
 
     pub fn add_incoming_to_phi(&self, phi: ValueRef, val: ValueRef, bb: BasicBlockRef) {
         unsafe {
+            if llvm::LLVMIsUndef(phi) == llvm::True { return; }
             llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint);
         }
     }
@@ -1209,36 +1233,4 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
 
         return Cow::Owned(casted_args);
     }
-
-    pub fn lifetime_start(&self, ptr: ValueRef, size: Size) {
-        self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size);
-    }
-
-    pub fn lifetime_end(&self, ptr: ValueRef, size: Size) {
-        self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size);
-    }
-
-    /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
-    /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
-    /// and the intrinsic for `lt` and passes them to `emit`, which is in
-    /// charge of generating code to call the passed intrinsic on whatever
-    /// block of generated code is targetted for the intrinsic.
-    ///
-    /// If LLVM lifetime intrinsic support is disabled (i.e.  optimizations
-    /// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
-    fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: ValueRef, size: Size) {
-        if self.ccx.sess().opts.optimize == config::OptLevel::No {
-            return;
-        }
-
-        let size = size.bytes();
-        if size == 0 {
-            return;
-        }
-
-        let lifetime_intrinsic = self.ccx.get_intrinsic(intrinsic);
-
-        let ptr = self.pointercast(ptr, Type::i8p(self.ccx));
-        self.call(lifetime_intrinsic, &[C_u64(self.ccx, size), ptr], None);
-    }
 }
diff --git a/src/librustc_trans/cabi_aarch64.rs b/src/librustc_trans/cabi_aarch64.rs
index d5f341f9685..bf842e6358f 100644
--- a/src/librustc_trans/cabi_aarch64.rs
+++ b/src/librustc_trans/cabi_aarch64.rs
@@ -14,7 +14,7 @@ use context::CrateContext;
 fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
                                      -> Option<Uniform> {
     arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
-        let size = arg.layout.size;
+        let size = arg.layout.size(ccx);
 
         // Ensure we have at most four uniquely addressable members.
         if size > unit.size.checked_mul(4, ccx).unwrap() {
@@ -44,10 +44,10 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
         return;
     }
     if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) {
-        ret.cast_to(uniform);
+        ret.cast_to(ccx, uniform);
         return;
     }
-    let size = ret.layout.size;
+    let size = ret.layout.size(ccx);
     let bits = size.bits();
     if bits <= 128 {
         let unit = if bits <= 8 {
@@ -60,13 +60,13 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
             Reg::i64()
         };
 
-        ret.cast_to(Uniform {
+        ret.cast_to(ccx, Uniform {
             unit,
             total: size
         });
         return;
     }
-    ret.make_indirect();
+    ret.make_indirect(ccx);
 }
 
 fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
@@ -75,10 +75,10 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc
         return;
     }
     if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) {
-        arg.cast_to(uniform);
+        arg.cast_to(ccx, uniform);
         return;
     }
-    let size = arg.layout.size;
+    let size = arg.layout.size(ccx);
     let bits = size.bits();
     if bits <= 128 {
         let unit = if bits <= 8 {
@@ -91,13 +91,13 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc
             Reg::i64()
         };
 
-        arg.cast_to(Uniform {
+        arg.cast_to(ccx, Uniform {
             unit,
             total: size
         });
         return;
     }
-    arg.make_indirect();
+    arg.make_indirect(ccx);
 }
 
 pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs
index 438053d63b5..635741b4d1a 100644
--- a/src/librustc_trans/cabi_arm.rs
+++ b/src/librustc_trans/cabi_arm.rs
@@ -15,7 +15,7 @@ use llvm::CallConv;
 fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
                                      -> Option<Uniform> {
     arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
-        let size = arg.layout.size;
+        let size = arg.layout.size(ccx);
 
         // Ensure we have at most four uniquely addressable members.
         if size > unit.size.checked_mul(4, ccx).unwrap() {
@@ -47,12 +47,12 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
 
     if vfp {
         if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) {
-            ret.cast_to(uniform);
+            ret.cast_to(ccx, uniform);
             return;
         }
     }
 
-    let size = ret.layout.size;
+    let size = ret.layout.size(ccx);
     let bits = size.bits();
     if bits <= 32 {
         let unit = if bits <= 8 {
@@ -62,13 +62,13 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
         } else {
             Reg::i32()
         };
-        ret.cast_to(Uniform {
+        ret.cast_to(ccx, Uniform {
             unit,
             total: size
         });
         return;
     }
-    ret.make_indirect();
+    ret.make_indirect(ccx);
 }
 
 fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, vfp: bool) {
@@ -79,14 +79,14 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc
 
     if vfp {
         if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) {
-            arg.cast_to(uniform);
+            arg.cast_to(ccx, uniform);
             return;
         }
     }
 
-    let align = arg.layout.align.abi();
-    let total = arg.layout.size;
-    arg.cast_to(Uniform {
+    let align = arg.layout.align(ccx).abi();
+    let total = arg.layout.size(ccx);
+    arg.cast_to(ccx, Uniform {
         unit: if align <= 4 { Reg::i32() } else { Reg::i64() },
         total
     });
diff --git a/src/librustc_trans/cabi_asmjs.rs b/src/librustc_trans/cabi_asmjs.rs
index 1664251cf89..6fcd3ed581d 100644
--- a/src/librustc_trans/cabi_asmjs.rs
+++ b/src/librustc_trans/cabi_asmjs.rs
@@ -8,7 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use abi::{FnType, ArgType, LayoutExt, Uniform};
+use abi::{FnType, ArgType, ArgAttribute, LayoutExt, Uniform};
 use context::CrateContext;
 
 // Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128
@@ -19,9 +19,9 @@ use context::CrateContext;
 fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
     if ret.layout.is_aggregate() {
         if let Some(unit) = ret.layout.homogeneous_aggregate(ccx) {
-            let size = ret.layout.size;
+            let size = ret.layout.size(ccx);
             if unit.size == size {
-                ret.cast_to(Uniform {
+                ret.cast_to(ccx, Uniform {
                     unit,
                     total: size
                 });
@@ -29,13 +29,14 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
             }
         }
 
-        ret.make_indirect();
+        ret.make_indirect(ccx);
     }
 }
 
-fn classify_arg_ty(arg: &mut ArgType) {
+fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
     if arg.layout.is_aggregate() {
-        arg.make_indirect_byval();
+        arg.make_indirect(ccx);
+        arg.attrs.set(ArgAttribute::ByVal);
     }
 }
 
@@ -46,6 +47,6 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType
 
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
-        classify_arg_ty(arg);
+        classify_arg_ty(ccx, arg);
     }
 }
diff --git a/src/librustc_trans/cabi_hexagon.rs b/src/librustc_trans/cabi_hexagon.rs
index 7e7e483fea0..1acda72675c 100644
--- a/src/librustc_trans/cabi_hexagon.rs
+++ b/src/librustc_trans/cabi_hexagon.rs
@@ -11,32 +11,33 @@
 #![allow(non_upper_case_globals)]
 
 use abi::{FnType, ArgType, LayoutExt};
+use context::CrateContext;
 
-fn classify_ret_ty(ret: &mut ArgType) {
-    if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
-        ret.make_indirect();
+fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+    if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 64 {
+        ret.make_indirect(ccx);
     } else {
         ret.extend_integer_width_to(32);
     }
 }
 
-fn classify_arg_ty(arg: &mut ArgType) {
-    if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
-        arg.make_indirect();
+fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
+    if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 64 {
+        arg.make_indirect(ccx);
     } else {
         arg.extend_integer_width_to(32);
     }
 }
 
-pub fn compute_abi_info(fty: &mut FnType) {
+pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     if !fty.ret.is_ignore() {
-        classify_ret_ty(&mut fty.ret);
+        classify_ret_ty(ccx, &mut fty.ret);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() {
             continue;
         }
-        classify_arg_ty(arg);
+        classify_arg_ty(ccx, arg);
     }
 }
diff --git a/src/librustc_trans/cabi_mips.rs b/src/librustc_trans/cabi_mips.rs
index fe61670a108..b7b60859d4a 100644
--- a/src/librustc_trans/cabi_mips.rs
+++ b/src/librustc_trans/cabi_mips.rs
@@ -8,48 +8,45 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
+use std::cmp;
+use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform};
 use context::CrateContext;
 
-use rustc::ty::layout::Size;
-
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                             ret: &mut ArgType<'tcx>,
-                             offset: &mut Size) {
+fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(32);
     } else {
-        ret.make_indirect();
-        *offset += ccx.tcx().data_layout.pointer_size;
+        ret.make_indirect(ccx);
     }
 }
 
-fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
-    let dl = &ccx.tcx().data_layout;
-    let size = arg.layout.size;
-    let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
+fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) {
+    let size = arg.layout.size(ccx);
+    let mut align = arg.layout.align(ccx).abi();
+    align = cmp::min(cmp::max(align, 4), 8);
 
     if arg.layout.is_aggregate() {
-        arg.cast_to(Uniform {
+        arg.cast_to(ccx, Uniform {
             unit: Reg::i32(),
             total: size
         });
-        if !offset.is_abi_aligned(align) {
-            arg.pad_with(Reg::i32());
+        if ((align - 1) & *offset) > 0 {
+            arg.pad_with(ccx, Reg::i32());
         }
     } else {
         arg.extend_integer_width_to(32);
     }
 
-    *offset = offset.abi_align(align) + size.abi_align(align);
+    *offset = align_up_to(*offset, align);
+    *offset += align_up_to(size.bytes(), align);
 }
 
 pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
-    let mut offset = Size::from_bytes(0);
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret, &mut offset);
+        classify_ret_ty(ccx, &mut fty.ret);
     }
 
+    let mut offset = if fty.ret.is_indirect() { 4 } else { 0 };
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
         classify_arg_ty(ccx, arg, &mut offset);
diff --git a/src/librustc_trans/cabi_mips64.rs b/src/librustc_trans/cabi_mips64.rs
index 16d0cfe072d..dff75e628de 100644
--- a/src/librustc_trans/cabi_mips64.rs
+++ b/src/librustc_trans/cabi_mips64.rs
@@ -8,48 +8,45 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
+use std::cmp;
+use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform};
 use context::CrateContext;
 
-use rustc::ty::layout::Size;
-
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                             ret: &mut ArgType<'tcx>,
-                             offset: &mut Size) {
+fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(64);
     } else {
-        ret.make_indirect();
-        *offset += ccx.tcx().data_layout.pointer_size;
+        ret.make_indirect(ccx);
     }
 }
 
-fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
-    let dl = &ccx.tcx().data_layout;
-    let size = arg.layout.size;
-    let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
+fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) {
+    let size = arg.layout.size(ccx);
+    let mut align = arg.layout.align(ccx).abi();
+    align = cmp::min(cmp::max(align, 4), 8);
 
     if arg.layout.is_aggregate() {
-        arg.cast_to(Uniform {
+        arg.cast_to(ccx, Uniform {
             unit: Reg::i64(),
             total: size
         });
-        if !offset.is_abi_aligned(align) {
-            arg.pad_with(Reg::i64());
+        if ((align - 1) & *offset) > 0 {
+            arg.pad_with(ccx, Reg::i64());
         }
     } else {
         arg.extend_integer_width_to(64);
     }
 
-    *offset = offset.abi_align(align) + size.abi_align(align);
+    *offset = align_up_to(*offset, align);
+    *offset += align_up_to(size.bytes(), align);
 }
 
 pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
-    let mut offset = Size::from_bytes(0);
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret, &mut offset);
+        classify_ret_ty(ccx, &mut fty.ret);
     }
 
+    let mut offset = if fty.ret.is_indirect() { 8 } else { 0 };
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
         classify_arg_ty(ccx, arg, &mut offset);
diff --git a/src/librustc_trans/cabi_msp430.rs b/src/librustc_trans/cabi_msp430.rs
index d270886a19c..546bb5ad9b4 100644
--- a/src/librustc_trans/cabi_msp430.rs
+++ b/src/librustc_trans/cabi_msp430.rs
@@ -12,6 +12,7 @@
 // http://www.ti.com/lit/an/slaa534/slaa534.pdf
 
 use abi::{ArgType, FnType, LayoutExt};
+use context::CrateContext;
 
 // 3.5 Structures or Unions Passed and Returned by Reference
 //
@@ -19,31 +20,31 @@ use abi::{ArgType, FnType, LayoutExt};
 // returned by reference. To pass a structure or union by reference, the caller
 // places its address in the appropriate location: either in a register or on
 // the stack, according to its position in the argument list. (..)"
-fn classify_ret_ty(ret: &mut ArgType) {
-    if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
-        ret.make_indirect();
+fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+    if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 {
+        ret.make_indirect(ccx);
     } else {
         ret.extend_integer_width_to(16);
     }
 }
 
-fn classify_arg_ty(arg: &mut ArgType) {
-    if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
-        arg.make_indirect();
+fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
+    if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 {
+        arg.make_indirect(ccx);
     } else {
         arg.extend_integer_width_to(16);
     }
 }
 
-pub fn compute_abi_info(fty: &mut FnType) {
+pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     if !fty.ret.is_ignore() {
-        classify_ret_ty(&mut fty.ret);
+        classify_ret_ty(ccx, &mut fty.ret);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() {
             continue;
         }
-        classify_arg_ty(arg);
+        classify_arg_ty(ccx, arg);
     }
 }
diff --git a/src/librustc_trans/cabi_nvptx.rs b/src/librustc_trans/cabi_nvptx.rs
index 69cfc690a9f..3873752b254 100644
--- a/src/librustc_trans/cabi_nvptx.rs
+++ b/src/librustc_trans/cabi_nvptx.rs
@@ -12,32 +12,33 @@
 // http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
 
 use abi::{ArgType, FnType, LayoutExt};
+use context::CrateContext;
 
-fn classify_ret_ty(ret: &mut ArgType) {
-    if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
-        ret.make_indirect();
+fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+    if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 {
+        ret.make_indirect(ccx);
     } else {
         ret.extend_integer_width_to(32);
     }
 }
 
-fn classify_arg_ty(arg: &mut ArgType) {
-    if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
-        arg.make_indirect();
+fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
+    if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 {
+        arg.make_indirect(ccx);
     } else {
         arg.extend_integer_width_to(32);
     }
 }
 
-pub fn compute_abi_info(fty: &mut FnType) {
+pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     if !fty.ret.is_ignore() {
-        classify_ret_ty(&mut fty.ret);
+        classify_ret_ty(ccx, &mut fty.ret);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() {
             continue;
         }
-        classify_arg_ty(arg);
+        classify_arg_ty(ccx, arg);
     }
 }
diff --git a/src/librustc_trans/cabi_nvptx64.rs b/src/librustc_trans/cabi_nvptx64.rs
index 4d76c156038..24bf4920c16 100644
--- a/src/librustc_trans/cabi_nvptx64.rs
+++ b/src/librustc_trans/cabi_nvptx64.rs
@@ -12,32 +12,33 @@
 // http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
 
 use abi::{ArgType, FnType, LayoutExt};
+use context::CrateContext;
 
-fn classify_ret_ty(ret: &mut ArgType) {
-    if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
-        ret.make_indirect();
+fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+    if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 64 {
+        ret.make_indirect(ccx);
     } else {
         ret.extend_integer_width_to(64);
     }
 }
 
-fn classify_arg_ty(arg: &mut ArgType) {
-    if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
-        arg.make_indirect();
+fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
+    if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 64 {
+        arg.make_indirect(ccx);
     } else {
         arg.extend_integer_width_to(64);
     }
 }
 
-pub fn compute_abi_info(fty: &mut FnType) {
+pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     if !fty.ret.is_ignore() {
-        classify_ret_ty(&mut fty.ret);
+        classify_ret_ty(ccx, &mut fty.ret);
     }
 
     for arg in &mut fty.args {
         if arg.is_ignore() {
             continue;
         }
-        classify_arg_ty(arg);
+        classify_arg_ty(ccx, arg);
     }
 }
diff --git a/src/librustc_trans/cabi_powerpc.rs b/src/librustc_trans/cabi_powerpc.rs
index c3c8c745e3a..f951ac76391 100644
--- a/src/librustc_trans/cabi_powerpc.rs
+++ b/src/librustc_trans/cabi_powerpc.rs
@@ -8,48 +8,46 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
+use abi::{align_up_to, FnType, ArgType, LayoutExt, Reg, Uniform};
 use context::CrateContext;
 
-use rustc::ty::layout::Size;
+use std::cmp;
 
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                             ret: &mut ArgType<'tcx>,
-                             offset: &mut Size) {
+fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(32);
     } else {
-        ret.make_indirect();
-        *offset += ccx.tcx().data_layout.pointer_size;
+        ret.make_indirect(ccx);
     }
 }
 
-fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
-    let dl = &ccx.tcx().data_layout;
-    let size = arg.layout.size;
-    let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
+fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) {
+    let size = arg.layout.size(ccx);
+    let mut align = arg.layout.align(ccx).abi();
+    align = cmp::min(cmp::max(align, 4), 8);
 
     if arg.layout.is_aggregate() {
-        arg.cast_to(Uniform {
+        arg.cast_to(ccx, Uniform {
             unit: Reg::i32(),
             total: size
         });
-        if !offset.is_abi_aligned(align) {
-            arg.pad_with(Reg::i32());
+        if ((align - 1) & *offset) > 0 {
+            arg.pad_with(ccx, Reg::i32());
         }
     } else {
         arg.extend_integer_width_to(32);
     }
 
-    *offset = offset.abi_align(align) + size.abi_align(align);
+    *offset = align_up_to(*offset, align);
+    *offset += align_up_to(size.bytes(), align);
 }
 
 pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
-    let mut offset = Size::from_bytes(0);
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret, &mut offset);
+        classify_ret_ty(ccx, &mut fty.ret);
     }
 
+    let mut offset = if fty.ret.is_indirect() { 4 } else { 0 };
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
         classify_arg_ty(ccx, arg, &mut offset);
diff --git a/src/librustc_trans/cabi_powerpc64.rs b/src/librustc_trans/cabi_powerpc64.rs
index 2206a4fa00c..fb5472eb6ae 100644
--- a/src/librustc_trans/cabi_powerpc64.rs
+++ b/src/librustc_trans/cabi_powerpc64.rs
@@ -28,23 +28,25 @@ fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                                       abi: ABI)
                                      -> Option<Uniform> {
     arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
+        let size = arg.layout.size(ccx);
+
         // ELFv1 only passes one-member aggregates transparently.
         // ELFv2 passes up to eight uniquely addressable members.
-        if (abi == ELFv1 && arg.layout.size > unit.size)
-                || arg.layout.size > unit.size.checked_mul(8, ccx).unwrap() {
+        if (abi == ELFv1 && size > unit.size)
+                || size > unit.size.checked_mul(8, ccx).unwrap() {
             return None;
         }
 
         let valid_unit = match unit.kind {
             RegKind::Integer => false,
             RegKind::Float => true,
-            RegKind::Vector => arg.layout.size.bits() == 128
+            RegKind::Vector => size.bits() == 128
         };
 
         if valid_unit {
             Some(Uniform {
                 unit,
-                total: arg.layout.size
+                total: size
             })
         } else {
             None
@@ -60,16 +62,16 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
 
     // The ELFv1 ABI doesn't return aggregates in registers
     if abi == ELFv1 {
-        ret.make_indirect();
+        ret.make_indirect(ccx);
         return;
     }
 
     if let Some(uniform) = is_homogeneous_aggregate(ccx, ret, abi) {
-        ret.cast_to(uniform);
+        ret.cast_to(ccx, uniform);
         return;
     }
 
-    let size = ret.layout.size;
+    let size = ret.layout.size(ccx);
     let bits = size.bits();
     if bits <= 128 {
         let unit = if bits <= 8 {
@@ -82,14 +84,14 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
             Reg::i64()
         };
 
-        ret.cast_to(Uniform {
+        ret.cast_to(ccx, Uniform {
             unit,
             total: size
         });
         return;
     }
 
-    ret.make_indirect();
+    ret.make_indirect(ccx);
 }
 
 fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, abi: ABI) {
@@ -99,11 +101,11 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc
     }
 
     if let Some(uniform) = is_homogeneous_aggregate(ccx, arg, abi) {
-        arg.cast_to(uniform);
+        arg.cast_to(ccx, uniform);
         return;
     }
 
-    let size = arg.layout.size;
+    let size = arg.layout.size(ccx);
     let (unit, total) = match abi {
         ELFv1 => {
             // In ELFv1, aggregates smaller than a doubleword should appear in
@@ -122,7 +124,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc
         },
     };
 
-    arg.cast_to(Uniform {
+    arg.cast_to(ccx, Uniform {
         unit,
         total
     });
diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs
index 9fb460043ae..fedebea3f4c 100644
--- a/src/librustc_trans/cabi_s390x.rs
+++ b/src/librustc_trans/cabi_s390x.rs
@@ -14,27 +14,23 @@
 use abi::{FnType, ArgType, LayoutExt, Reg};
 use context::CrateContext;
 
-use rustc::ty::layout::{self, TyLayout};
+use rustc::ty::layout::{self, Layout, TyLayout};
 
-fn classify_ret_ty(ret: &mut ArgType) {
-    if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 {
+fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
+    if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 {
         ret.extend_integer_width_to(64);
     } else {
-        ret.make_indirect();
+        ret.make_indirect(ccx);
     }
 }
 
 fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                                   layout: TyLayout<'tcx>) -> bool {
-    match layout.abi {
-        layout::Abi::Scalar(ref scalar) => {
-            match scalar.value {
-                layout::F32 | layout::F64 => true,
-                _ => false
-            }
-        }
-        layout::Abi::Aggregate { .. } => {
-            if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
+    match *layout {
+        Layout::Scalar { value: layout::F32, .. } |
+        Layout::Scalar { value: layout::F64, .. } => true,
+        Layout::Univariant { .. } => {
+            if layout.field_count() == 1 {
                 is_single_fp_element(ccx, layout.field(ccx, 0))
             } else {
                 false
@@ -45,31 +41,32 @@ fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
 }
 
 fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
-    if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 {
+    let size = arg.layout.size(ccx);
+    if !arg.layout.is_aggregate() && size.bits() <= 64 {
         arg.extend_integer_width_to(64);
         return;
     }
 
     if is_single_fp_element(ccx, arg.layout) {
-        match arg.layout.size.bytes() {
-            4 => arg.cast_to(Reg::f32()),
-            8 => arg.cast_to(Reg::f64()),
-            _ => arg.make_indirect()
+        match size.bytes() {
+            4 => arg.cast_to(ccx, Reg::f32()),
+            8 => arg.cast_to(ccx, Reg::f64()),
+            _ => arg.make_indirect(ccx)
         }
     } else {
-        match arg.layout.size.bytes() {
-            1 => arg.cast_to(Reg::i8()),
-            2 => arg.cast_to(Reg::i16()),
-            4 => arg.cast_to(Reg::i32()),
-            8 => arg.cast_to(Reg::i64()),
-            _ => arg.make_indirect()
+        match size.bytes() {
+            1 => arg.cast_to(ccx, Reg::i8()),
+            2 => arg.cast_to(ccx, Reg::i16()),
+            4 => arg.cast_to(ccx, Reg::i32()),
+            8 => arg.cast_to(ccx, Reg::i64()),
+            _ => arg.make_indirect(ccx)
         }
     }
 }
 
 pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
     if !fty.ret.is_ignore() {
-        classify_ret_ty(&mut fty.ret);
+        classify_ret_ty(ccx, &mut fty.ret);
     }
 
     for arg in &mut fty.args {
diff --git a/src/librustc_trans/cabi_sparc.rs b/src/librustc_trans/cabi_sparc.rs
index fe61670a108..c17901e1ade 100644
--- a/src/librustc_trans/cabi_sparc.rs
+++ b/src/librustc_trans/cabi_sparc.rs
@@ -8,48 +8,45 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use abi::{ArgType, FnType, LayoutExt, Reg, Uniform};
+use std::cmp;
+use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform};
 use context::CrateContext;
 
-use rustc::ty::layout::Size;
-
-fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                             ret: &mut ArgType<'tcx>,
-                             offset: &mut Size) {
+fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
     if !ret.layout.is_aggregate() {
         ret.extend_integer_width_to(32);
     } else {
-        ret.make_indirect();
-        *offset += ccx.tcx().data_layout.pointer_size;
+        ret.make_indirect(ccx);
     }
 }
 
-fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
-    let dl = &ccx.tcx().data_layout;
-    let size = arg.layout.size;
-    let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
+fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) {
+    let size = arg.layout.size(ccx);
+    let mut align = arg.layout.align(ccx).abi();
+    align = cmp::min(cmp::max(align, 4), 8);
 
     if arg.layout.is_aggregate() {
-        arg.cast_to(Uniform {
+        arg.cast_to(ccx, Uniform {
             unit: Reg::i32(),
             total: size
         });
-        if !offset.is_abi_aligned(align) {
-            arg.pad_with(Reg::i32());
+        if ((align - 1) & *offset) > 0 {
+            arg.pad_with(ccx, Reg::i32());
         }
     } else {
-        arg.extend_integer_width_to(32);
+        arg.extend_integer_width_to(32)
     }
 
-    *offset = offset.abi_align(align) + size.abi_align(align);
+    *offset = align_up_to(*offset, align);
+    *offset += align_up_to(size.bytes(), align);
 }
 
 pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
-    let mut offset = Size::from_bytes(0);
     if !fty.ret.is_ignore() {
-        classify_ret_ty(ccx, &mut fty.ret, &mut offset);
+        classify_ret_ty(ccx, &mut fty.ret);
     }
 
+    let mut offset = if fty.ret.is_indirect() { 4 } else { 0 };
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
         classify_arg_ty(ccx, arg, &mut offset);
diff --git a/src/librustc_trans/cabi_sparc64.rs b/src/librustc_trans/cabi_sparc64.rs
index 7c52e27fa67..8383007550e 100644
--- a/src/librustc_trans/cabi_sparc64.rs
+++ b/src/librustc_trans/cabi_sparc64.rs
@@ -16,21 +16,23 @@ use context::CrateContext;
 fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
                                      -> Option<Uniform> {
     arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
+        let size = arg.layout.size(ccx);
+
         // Ensure we have at most eight uniquely addressable members.
-        if arg.layout.size > unit.size.checked_mul(8, ccx).unwrap() {
+        if size > unit.size.checked_mul(8, ccx).unwrap() {
             return None;
         }
 
         let valid_unit = match unit.kind {
             RegKind::Integer => false,
             RegKind::Float => true,
-            RegKind::Vector => arg.layout.size.bits() == 128
+            RegKind::Vector => size.bits() == 128
         };
 
         if valid_unit {
             Some(Uniform {
                 unit,
-                total: arg.layout.size
+                total: size
             })
         } else {
             None
@@ -45,10 +47,10 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
     }
 
     if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) {
-        ret.cast_to(uniform);
+        ret.cast_to(ccx, uniform);
         return;
     }
-    let size = ret.layout.size;
+    let size = ret.layout.size(ccx);
     let bits = size.bits();
     if bits <= 128 {
         let unit = if bits <= 8 {
@@ -61,7 +63,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
             Reg::i64()
         };
 
-        ret.cast_to(Uniform {
+        ret.cast_to(ccx, Uniform {
             unit,
             total: size
         });
@@ -69,7 +71,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
     }
 
     // don't return aggregates in registers
-    ret.make_indirect();
+    ret.make_indirect(ccx);
 }
 
 fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
@@ -79,12 +81,12 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc
     }
 
     if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) {
-        arg.cast_to(uniform);
+        arg.cast_to(ccx, uniform);
         return;
     }
 
-    let total = arg.layout.size;
-    arg.cast_to(Uniform {
+    let total = arg.layout.size(ccx);
+    arg.cast_to(ccx, Uniform {
         unit: Reg::i64(),
         total
     });
diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs
index 6fd0140c399..49634d6e78c 100644
--- a/src/librustc_trans/cabi_x86.rs
+++ b/src/librustc_trans/cabi_x86.rs
@@ -8,10 +8,10 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use abi::{ArgAttribute, FnType, LayoutExt, PassMode, Reg, RegKind};
+use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind};
 use common::CrateContext;
 
-use rustc::ty::layout::{self, TyLayout};
+use rustc::ty::layout::{self, Layout, TyLayout};
 
 #[derive(PartialEq)]
 pub enum Flavor {
@@ -21,15 +21,11 @@ pub enum Flavor {
 
 fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                                   layout: TyLayout<'tcx>) -> bool {
-    match layout.abi {
-        layout::Abi::Scalar(ref scalar) => {
-            match scalar.value {
-                layout::F32 | layout::F64 => true,
-                _ => false
-            }
-        }
-        layout::Abi::Aggregate { .. } => {
-            if layout.fields.count() == 1 && layout.fields.offset(0).bytes() == 0 {
+    match *layout {
+        Layout::Scalar { value: layout::F32, .. } |
+        Layout::Scalar { value: layout::F64, .. } => true,
+        Layout::Univariant { .. } => {
+            if layout.field_count() == 1 {
                 is_single_fp_element(ccx, layout.field(ccx, 0))
             } else {
                 false
@@ -54,25 +50,27 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
             let t = &ccx.sess().target.target;
             if t.options.is_like_osx || t.options.is_like_windows
                 || t.options.is_like_openbsd {
+                let size = fty.ret.layout.size(ccx);
+
                 // According to Clang, everyone but MSVC returns single-element
                 // float aggregates directly in a floating-point register.
                 if !t.options.is_like_msvc && is_single_fp_element(ccx, fty.ret.layout) {
-                    match fty.ret.layout.size.bytes() {
-                        4 => fty.ret.cast_to(Reg::f32()),
-                        8 => fty.ret.cast_to(Reg::f64()),
-                        _ => fty.ret.make_indirect()
+                    match size.bytes() {
+                        4 => fty.ret.cast_to(ccx, Reg::f32()),
+                        8 => fty.ret.cast_to(ccx, Reg::f64()),
+                        _ => fty.ret.make_indirect(ccx)
                     }
                 } else {
-                    match fty.ret.layout.size.bytes() {
-                        1 => fty.ret.cast_to(Reg::i8()),
-                        2 => fty.ret.cast_to(Reg::i16()),
-                        4 => fty.ret.cast_to(Reg::i32()),
-                        8 => fty.ret.cast_to(Reg::i64()),
-                        _ => fty.ret.make_indirect()
+                    match size.bytes() {
+                        1 => fty.ret.cast_to(ccx, Reg::i8()),
+                        2 => fty.ret.cast_to(ccx, Reg::i16()),
+                        4 => fty.ret.cast_to(ccx, Reg::i32()),
+                        8 => fty.ret.cast_to(ccx, Reg::i64()),
+                        _ => fty.ret.make_indirect(ccx)
                     }
                 }
             } else {
-                fty.ret.make_indirect();
+                fty.ret.make_indirect(ccx);
             }
         } else {
             fty.ret.extend_integer_width_to(32);
@@ -82,7 +80,8 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
     for arg in &mut fty.args {
         if arg.is_ignore() { continue; }
         if arg.layout.is_aggregate() {
-            arg.make_indirect_byval();
+            arg.make_indirect(ccx);
+            arg.attrs.set(ArgAttribute::ByVal);
         } else {
             arg.extend_integer_width_to(32);
         }
@@ -101,24 +100,17 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
         let mut free_regs = 2;
 
         for arg in &mut fty.args {
-            let attrs = match arg.mode {
-                PassMode::Ignore |
-                PassMode::Indirect(_) => continue,
-                PassMode::Direct(ref mut attrs) => attrs,
-                PassMode::Pair(..) |
-                PassMode::Cast(_) => {
-                    bug!("x86 shouldn't be passing arguments by {:?}", arg.mode)
-                }
-            };
+            if arg.is_ignore() || arg.is_indirect() { continue; }
 
             // At this point we know this must be a primitive of sorts.
             let unit = arg.layout.homogeneous_aggregate(ccx).unwrap();
-            assert_eq!(unit.size, arg.layout.size);
+            let size = arg.layout.size(ccx);
+            assert_eq!(unit.size, size);
             if unit.kind == RegKind::Float {
                 continue;
             }
 
-            let size_in_regs = (arg.layout.size.bits() + 31) / 32;
+            let size_in_regs = (size.bits() + 31) / 32;
 
             if size_in_regs == 0 {
                 continue;
@@ -130,8 +122,8 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
 
             free_regs -= size_in_regs;
 
-            if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer {
-                attrs.set(ArgAttribute::InReg);
+            if size.bits() <= 32 && unit.kind == RegKind::Integer {
+                arg.attrs.set(ArgAttribute::InReg);
             }
 
             if free_regs == 0 {
diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs
index 81eb362ca46..a814f458e12 100644
--- a/src/librustc_trans/cabi_x86_64.rs
+++ b/src/librustc_trans/cabi_x86_64.rs
@@ -11,10 +11,10 @@
 // The classification code for the x86_64 ABI is taken from the clay language
 // https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
 
-use abi::{ArgType, CastTarget, FnType, LayoutExt, Reg, RegKind};
+use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind};
 use context::CrateContext;
 
-use rustc::ty::layout::{self, TyLayout, Size};
+use rustc::ty::layout::{self, Layout, TyLayout, Size};
 
 #[derive(Clone, Copy, PartialEq, Debug)]
 enum Class {
@@ -34,9 +34,9 @@ const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
 fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
                           -> Result<[Class; MAX_EIGHTBYTES], Memory> {
     fn unify(cls: &mut [Class],
-             off: Size,
+             off: u64,
              c: Class) {
-        let i = (off.bytes() / 8) as usize;
+        let i = (off / 8) as usize;
         let to_write = match (cls[i], c) {
             (Class::None, _) => c,
             (_, Class::None) => return,
@@ -55,21 +55,20 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
     fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
                           layout: TyLayout<'tcx>,
                           cls: &mut [Class],
-                          off: Size)
+                          off: u64)
                           -> Result<(), Memory> {
-        if !off.is_abi_aligned(layout.align) {
-            if !layout.is_zst() {
+        if off % layout.align(ccx).abi() != 0 {
+            if layout.size(ccx).bytes() > 0 {
                 return Err(Memory);
             }
             return Ok(());
         }
 
-        match layout.abi {
-            layout::Abi::Uninhabited => {}
-
-            layout::Abi::Scalar(ref scalar) => {
-                let reg = match scalar.value {
-                    layout::Int(..) |
+        match *layout {
+            Layout::Scalar { value, .. } |
+            Layout::RawNullablePointer { value, .. } => {
+                let reg = match value {
+                    layout::Int(_) |
                     layout::Pointer => Class::Int,
                     layout::F32 |
                     layout::F64 => Class::Sse
@@ -77,43 +76,59 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
                 unify(cls, off, reg);
             }
 
-            layout::Abi::Vector => {
+            Layout::CEnum { .. } => {
+                unify(cls, off, Class::Int);
+            }
+
+            Layout::Vector { element, count } => {
                 unify(cls, off, Class::Sse);
 
                 // everything after the first one is the upper
                 // half of a register.
-                for i in 1..layout.fields.count() {
-                    let field_off = off + layout.fields.offset(i);
-                    unify(cls, field_off, Class::SseUp);
+                let eltsz = element.size(ccx).bytes();
+                for i in 1..count {
+                    unify(cls, off + i * eltsz, Class::SseUp);
                 }
             }
 
-            layout::Abi::ScalarPair(..) |
-            layout::Abi::Aggregate { .. } => {
-                match layout.variants {
-                    layout::Variants::Single { .. } => {
-                        for i in 0..layout.fields.count() {
-                            let field_off = off + layout.fields.offset(i);
-                            classify(ccx, layout.field(ccx, i), cls, field_off)?;
-                        }
+            Layout::Array { count, .. } => {
+                if count > 0 {
+                    let elt = layout.field(ccx, 0);
+                    let eltsz = elt.size(ccx).bytes();
+                    for i in 0..count {
+                        classify(ccx, elt, cls, off + i * eltsz)?;
                     }
-                    layout::Variants::Tagged { .. } |
-                    layout::Variants::NicheFilling { .. } => return Err(Memory),
                 }
             }
 
+            Layout::Univariant { ref variant, .. } => {
+                for i in 0..layout.field_count() {
+                    let field_off = off + variant.offsets[i].bytes();
+                    classify(ccx, layout.field(ccx, i), cls, field_off)?;
+                }
+            }
+
+            Layout::UntaggedUnion { .. } => {
+                for i in 0..layout.field_count() {
+                    classify(ccx, layout.field(ccx, i), cls, off)?;
+                }
+            }
+
+            Layout::FatPointer { .. } |
+            Layout::General { .. } |
+            Layout::StructWrappedNullablePointer { .. } => return Err(Memory)
         }
 
         Ok(())
     }
 
-    let n = ((arg.layout.size.bytes() + 7) / 8) as usize;
+    let n = ((arg.layout.size(ccx).bytes() + 7) / 8) as usize;
     if n > MAX_EIGHTBYTES {
         return Err(Memory);
     }
 
     let mut cls = [Class::None; MAX_EIGHTBYTES];
-    classify(ccx, arg.layout, &mut cls, Size::from_bytes(0))?;
+    classify(ccx, arg.layout, &mut cls, 0)?;
     if n > 2 {
         if cls[0] != Class::Sse {
             return Err(Memory);
@@ -138,7 +153,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
     Ok(cls)
 }
 
-fn reg_component(cls: &[Class], i: &mut usize, size: Size) -> Option<Reg> {
+fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option<Reg> {
     if *i >= cls.len() {
         return None;
     }
@@ -147,7 +162,7 @@ fn reg_component(cls: &[Class], i: &mut usize, size: Size) -> Option<Reg> {
         Class::None => None,
         Class::Int => {
             *i += 1;
-            Some(match size.bytes() {
+            Some(match size {
                 1 => Reg::i8(),
                 2 => Reg::i16(),
                 3 |
@@ -159,14 +174,14 @@ fn reg_component(cls: &[Class], i: &mut usize, size: Size) -> Option<Reg> {
             let vec_len = 1 + cls[*i+1..].iter().take_while(|&&c| c == Class::SseUp).count();
             *i += vec_len;
             Some(if vec_len == 1 {
-                match size.bytes() {
+                match size {
                     4 => Reg::f32(),
                     _ => Reg::f64()
                 }
             } else {
                 Reg {
                     kind: RegKind::Vector,
-                    size: Size::from_bytes(8) * (vec_len as u64)
+                    size: Size::from_bytes(vec_len as u64 * 8)
                 }
             })
         }
@@ -174,17 +189,17 @@ fn reg_component(cls: &[Class], i: &mut usize, size: Size) -> Option<Reg> {
     }
 }
 
-fn cast_target(cls: &[Class], size: Size) -> CastTarget {
+fn cast_target(cls: &[Class], size: u64) -> CastTarget {
     let mut i = 0;
     let lo = reg_component(cls, &mut i, size).unwrap();
-    let offset = Size::from_bytes(8) * (i as u64);
+    let offset = i as u64 * 8;
     let target = if size <= offset {
         CastTarget::from(lo)
     } else {
         let hi = reg_component(cls, &mut i, size - offset).unwrap();
         CastTarget::Pair(lo, hi)
     };
-    assert_eq!(reg_component(cls, &mut i, Size::from_bytes(0)), None);
+    assert_eq!(reg_component(cls, &mut i, 0), None);
     target
 }
 
@@ -214,11 +229,11 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType
         };
 
         if in_mem {
+            arg.make_indirect(ccx);
             if is_arg {
-                arg.make_indirect_byval();
+                arg.attrs.set(ArgAttribute::ByVal);
             } else {
                 // `sret` parameter thus one less integer register available
-                arg.make_indirect();
                 int_regs -= 1;
             }
         } else {
@@ -227,8 +242,8 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType
             sse_regs -= needed_sse;
 
             if arg.layout.is_aggregate() {
-                let size = arg.layout.size;
-                arg.cast_to(cast_target(cls.as_ref().unwrap(), size))
+                let size = arg.layout.size(ccx).bytes();
+                arg.cast_to(ccx, cast_target(cls.as_ref().unwrap(), size))
             } else {
                 arg.extend_integer_width_to(32);
             }
diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs
index 473c00120a7..39e728d4e4f 100644
--- a/src/librustc_trans/cabi_x86_win64.rs
+++ b/src/librustc_trans/cabi_x86_win64.rs
@@ -8,36 +8,32 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use abi::{ArgType, FnType, Reg};
+use abi::{ArgType, FnType, LayoutExt, Reg};
+use common::CrateContext;
 
-use rustc::ty::layout;
+use rustc::ty::layout::Layout;
 
 // Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
 
-pub fn compute_abi_info(fty: &mut FnType) {
-    let fixup = |a: &mut ArgType| {
-        match a.layout.abi {
-            layout::Abi::Uninhabited => {}
-            layout::Abi::ScalarPair(..) |
-            layout::Abi::Aggregate { .. } => {
-                match a.layout.size.bits() {
-                    8 => a.cast_to(Reg::i8()),
-                    16 => a.cast_to(Reg::i16()),
-                    32 => a.cast_to(Reg::i32()),
-                    64 => a.cast_to(Reg::i64()),
-                    _ => a.make_indirect()
-                }
-            }
-            layout::Abi::Vector => {
+pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
+    let fixup = |a: &mut ArgType<'tcx>| {
+        let size = a.layout.size(ccx);
+        if a.layout.is_aggregate() {
+            match size.bits() {
+                8 => a.cast_to(ccx, Reg::i8()),
+                16 => a.cast_to(ccx, Reg::i16()),
+                32 => a.cast_to(ccx, Reg::i32()),
+                64 => a.cast_to(ccx, Reg::i64()),
+                _ => a.make_indirect(ccx)
+            };
+        } else {
+            if let Layout::Vector { .. } = *a.layout {
                 // FIXME(eddyb) there should be a size cap here
                 // (probably what clang calls "illegal vectors").
-            }
-            layout::Abi::Scalar(_) => {
-                if a.layout.size.bytes() > 8 {
-                    a.make_indirect();
-                } else {
-                    a.extend_integer_width_to(32);
-                }
+            } else if size.bytes() > 8 {
+                a.make_indirect(ccx);
+            } else {
+                a.extend_integer_width_to(32);
             }
         }
     };
diff --git a/src/librustc_trans/callee.rs b/src/librustc_trans/callee.rs
index 4afeac2e8f5..b515c9420bf 100644
--- a/src/librustc_trans/callee.rs
+++ b/src/librustc_trans/callee.rs
@@ -20,14 +20,12 @@ use consts;
 use declare;
 use llvm::{self, ValueRef};
 use monomorphize::Instance;
-use type_of::LayoutLlvmExt;
-
 use rustc::hir::def_id::DefId;
 use rustc::ty::{self, TypeFoldable};
-use rustc::ty::layout::LayoutOf;
 use rustc::traits;
 use rustc::ty::subst::Substs;
 use rustc_back::PanicStrategy;
+use type_of;
 
 /// Translates a reference to a fn/method item, monomorphizing and
 /// inlining as it goes.
@@ -58,7 +56,7 @@ pub fn get_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
 
     // Create a fn pointer with the substituted signature.
     let fn_ptr_ty = tcx.mk_fn_ptr(common::ty_fn_sig(ccx, fn_ty));
-    let llptrty = ccx.layout_of(fn_ptr_ty).llvm_type(ccx);
+    let llptrty = type_of::type_of(ccx, fn_ptr_ty);
 
     let llfn = if let Some(llfn) = declare::get_declared_value(ccx, &sym) {
         // This is subtle and surprising, but sometimes we have to bitcast
diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs
index 7bd8a0c81ee..e3856cabcf9 100644
--- a/src/librustc_trans/common.rs
+++ b/src/librustc_trans/common.rs
@@ -18,17 +18,17 @@ use llvm::{True, False, Bool, OperandBundleDef};
 use rustc::hir::def_id::DefId;
 use rustc::hir::map::DefPathData;
 use rustc::middle::lang_items::LangItem;
-use abi;
 use base;
 use builder::Builder;
 use consts;
 use declare;
+use machine;
+use monomorphize;
 use type_::Type;
-use type_of::LayoutLlvmExt;
 use value::Value;
 use rustc::traits;
 use rustc::ty::{self, Ty, TyCtxt};
-use rustc::ty::layout::{HasDataLayout, LayoutOf};
+use rustc::ty::layout::{Layout, LayoutTyper};
 use rustc::ty::subst::{Kind, Subst, Substs};
 use rustc::hir;
 
@@ -41,6 +41,105 @@ use syntax_pos::{Span, DUMMY_SP};
 
 pub use context::{CrateContext, SharedCrateContext};
 
+pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
+    if let Layout::FatPointer { .. } = *ccx.layout_of(ty) {
+        true
+    } else {
+        false
+    }
+}
+
+pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
+    let layout = ccx.layout_of(ty);
+    match *layout {
+        Layout::CEnum { .. } |
+        Layout::Scalar { .. } |
+        Layout::Vector { .. } => true,
+
+        Layout::FatPointer { .. } => false,
+
+        Layout::Array { .. } |
+        Layout::Univariant { .. } |
+        Layout::General { .. } |
+        Layout::UntaggedUnion { .. } |
+        Layout::RawNullablePointer { .. } |
+        Layout::StructWrappedNullablePointer { .. } => {
+            !layout.is_unsized() && layout.size(ccx).bytes() == 0
+        }
+    }
+}
+
+/// Returns Some([a, b]) if the type has a pair of fields with types a and b.
+pub fn type_pair_fields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
+                                  -> Option<[Ty<'tcx>; 2]> {
+    match ty.sty {
+        ty::TyAdt(adt, substs) => {
+            assert_eq!(adt.variants.len(), 1);
+            let fields = &adt.variants[0].fields;
+            if fields.len() != 2 {
+                return None;
+            }
+            Some([monomorphize::field_ty(ccx.tcx(), substs, &fields[0]),
+                  monomorphize::field_ty(ccx.tcx(), substs, &fields[1])])
+        }
+        ty::TyClosure(def_id, substs) => {
+            let mut tys = substs.upvar_tys(def_id, ccx.tcx());
+            tys.next().and_then(|first_ty| tys.next().and_then(|second_ty| {
+                if tys.next().is_some() {
+                    None
+                } else {
+                    Some([first_ty, second_ty])
+                }
+            }))
+        }
+        ty::TyGenerator(def_id, substs, _) => {
+            let mut tys = substs.field_tys(def_id, ccx.tcx());
+            tys.next().and_then(|first_ty| tys.next().and_then(|second_ty| {
+                if tys.next().is_some() {
+                    None
+                } else {
+                    Some([first_ty, second_ty])
+                }
+            }))
+        }
+        ty::TyTuple(tys, _) => {
+            if tys.len() != 2 {
+                return None;
+            }
+            Some([tys[0], tys[1]])
+        }
+        _ => None
+    }
+}
+
+/// Returns true if the type is represented as a pair of immediates.
+pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
+                                  -> bool {
+    match *ccx.layout_of(ty) {
+        Layout::FatPointer { .. } => true,
+        Layout::Univariant { ref variant, .. } => {
+            // There must be only 2 fields.
+            if variant.offsets.len() != 2 {
+                return false;
+            }
+
+            match type_pair_fields(ccx, ty) {
+                Some([a, b]) => {
+                    type_is_immediate(ccx, a) && type_is_immediate(ccx, b)
+                }
+                None => false
+            }
+        }
+        _ => false
+    }
+}
+
+/// Identify types which have size zero at runtime.
+pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
+    let layout = ccx.layout_of(ty);
+    !layout.is_unsized() && layout.size(ccx).bytes() == 0
+}
+
 pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
     ty.needs_drop(tcx, ty::ParamEnv::empty(traits::Reveal::All))
 }
@@ -146,13 +245,17 @@ pub fn C_uint(t: Type, i: u64) -> ValueRef {
     }
 }
 
-pub fn C_uint_big(t: Type, u: u128) -> ValueRef {
+pub fn C_big_integral(t: Type, u: u128) -> ValueRef {
     unsafe {
-        let words = [u as u64, (u >> 64) as u64];
+        let words = [u as u64, u.wrapping_shr(64) as u64];
         llvm::LLVMConstIntOfArbitraryPrecision(t.to_ref(), 2, words.as_ptr())
     }
 }
 
+pub fn C_nil(ccx: &CrateContext) -> ValueRef {
+    C_struct(ccx, &[], false)
+}
+
 pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef {
     C_uint(Type::i1(ccx), val as u64)
 }
@@ -170,7 +273,8 @@ pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef {
 }
 
 pub fn C_usize(ccx: &CrateContext, i: u64) -> ValueRef {
-    let bit_size = ccx.data_layout().pointer_size.bits();
+    let bit_size = machine::llbitsize_of_real(ccx, ccx.isize_ty());
+
     if bit_size < 64 {
         // make sure it doesn't overflow
         assert!(i < (1<<bit_size));
@@ -213,15 +317,8 @@ pub fn C_cstr(cx: &CrateContext, s: InternedString, null_terminated: bool) -> Va
 // you will be kicked off fast isel. See issue #4352 for an example of this.
 pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef {
     let len = s.len();
-    let cs = consts::ptrcast(C_cstr(cx, s, false),
-        cx.layout_of(cx.tcx().mk_str()).llvm_type(cx).ptr_to());
-    C_fat_ptr(cx, cs, C_usize(cx, len as u64))
-}
-
-pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef {
-    assert_eq!(abi::FAT_PTR_ADDR, 0);
-    assert_eq!(abi::FAT_PTR_EXTRA, 1);
-    C_struct(cx, &[ptr, meta], false)
+    let cs = consts::ptrcast(C_cstr(cx, s, false), Type::i8p(cx));
+    C_named_struct(cx.str_slice_type(), &[cs, C_usize(cx, len as u64)])
 }
 
 pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef {
@@ -236,6 +333,12 @@ pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) ->
     }
 }
 
+pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef {
+    unsafe {
+        llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint)
+    }
+}
+
 pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef {
     unsafe {
         return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint);
@@ -259,14 +362,13 @@ pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef {
     }
 }
 
-pub fn const_get_elt(v: ValueRef, idx: u64) -> ValueRef {
+pub fn const_get_elt(v: ValueRef, us: &[c_uint])
+              -> ValueRef {
     unsafe {
-        assert_eq!(idx as c_uint as u64, idx);
-        let us = &[idx as c_uint];
         let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
 
-        debug!("const_get_elt(v={:?}, idx={}, r={:?})",
-               Value(v), idx, Value(r));
+        debug!("const_get_elt(v={:?}, us={:?}, r={:?})",
+               Value(v), us, Value(r));
 
         r
     }
@@ -306,6 +408,19 @@ pub fn const_to_opt_u128(v: ValueRef, sign_ext: bool) -> Option<u128> {
     }
 }
 
+pub fn is_undef(val: ValueRef) -> bool {
+    unsafe {
+        llvm::LLVMIsUndef(val) != False
+    }
+}
+
+#[allow(dead_code)] // potentially useful
+pub fn is_null(val: ValueRef) -> bool {
+    unsafe {
+        llvm::LLVMIsNull(val) != False
+    }
+}
+
 pub fn langcall(tcx: TyCtxt,
                 span: Option<Span>,
                 msg: &str,
diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs
index cfca3b57cb9..4ae289cfada 100644
--- a/src/librustc_trans/consts.rs
+++ b/src/librustc_trans/consts.rs
@@ -14,19 +14,19 @@ use llvm::{ValueRef, True};
 use rustc::hir::def_id::DefId;
 use rustc::hir::map as hir_map;
 use rustc::middle::const_val::ConstEvalErr;
-use debuginfo;
+use {debuginfo, machine};
 use base;
 use trans_item::{TransItem, TransItemExt};
 use common::{self, CrateContext, val_ty};
 use declare;
 use monomorphize::Instance;
 use type_::Type;
-use type_of::LayoutLlvmExt;
+use type_of;
 use rustc::ty;
-use rustc::ty::layout::{Align, LayoutOf};
 
 use rustc::hir;
 
+use std::cmp;
 use std::ffi::{CStr, CString};
 use syntax::ast;
 use syntax::attr;
@@ -45,26 +45,26 @@ pub fn bitcast(val: ValueRef, ty: Type) -> ValueRef {
 
 fn set_global_alignment(ccx: &CrateContext,
                         gv: ValueRef,
-                        mut align: Align) {
+                        mut align: machine::llalign) {
     // The target may require greater alignment for globals than the type does.
     // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
     // which can force it to be smaller.  Rust doesn't support this yet.
     if let Some(min) = ccx.sess().target.target.options.min_global_align {
         match ty::layout::Align::from_bits(min, min) {
-            Ok(min) => align = align.max(min),
+            Ok(min) => align = cmp::max(align, min.abi() as machine::llalign),
             Err(err) => {
                 ccx.sess().err(&format!("invalid minimum global alignment: {}", err));
             }
         }
     }
     unsafe {
-        llvm::LLVMSetAlignment(gv, align.abi() as u32);
+        llvm::LLVMSetAlignment(gv, align);
     }
 }
 
 pub fn addr_of_mut(ccx: &CrateContext,
                    cv: ValueRef,
-                   align: Align,
+                   align: machine::llalign,
                    kind: &str)
                     -> ValueRef {
     unsafe {
@@ -82,16 +82,15 @@ pub fn addr_of_mut(ccx: &CrateContext,
 
 pub fn addr_of(ccx: &CrateContext,
                cv: ValueRef,
-               align: Align,
+               align: machine::llalign,
                kind: &str)
                -> ValueRef {
     if let Some(&gv) = ccx.const_globals().borrow().get(&cv) {
         unsafe {
             // Upgrade the alignment in cases where the same constant is used with different
             // alignment requirements
-            let llalign = align.abi() as u32;
-            if llalign > llvm::LLVMGetAlignment(gv) {
-                llvm::LLVMSetAlignment(gv, llalign);
+            if align > llvm::LLVMGetAlignment(gv) {
+                llvm::LLVMSetAlignment(gv, align);
             }
         }
         return gv;
@@ -113,7 +112,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef {
     let ty = common::instance_ty(ccx.tcx(), &instance);
     let g = if let Some(id) = ccx.tcx().hir.as_local_node_id(def_id) {
 
-        let llty = ccx.layout_of(ty).llvm_type(ccx);
+        let llty = type_of::type_of(ccx, ty);
         let (g, attrs) = match ccx.tcx().hir.get(id) {
             hir_map::NodeItem(&hir::Item {
                 ref attrs, span, node: hir::ItemStatic(..), ..
@@ -158,7 +157,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef {
                         }
                     };
                     let llty2 = match ty.sty {
-                        ty::TyRawPtr(ref mt) => ccx.layout_of(mt.ty).llvm_type(ccx),
+                        ty::TyRawPtr(ref mt) => type_of::type_of(ccx, mt.ty),
                         _ => {
                             ccx.sess().span_fatal(span, "must have type `*const T` or `*mut T`");
                         }
@@ -207,7 +206,7 @@ pub fn get_static(ccx: &CrateContext, def_id: DefId) -> ValueRef {
 
         // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
         // FIXME(nagisa): investigate whether it can be changed into define_global
-        let g = declare::declare_global(ccx, &sym, ccx.layout_of(ty).llvm_type(ccx));
+        let g = declare::declare_global(ccx, &sym, type_of::type_of(ccx, ty));
         // Thread-local statics in some other crate need to *always* be linked
         // against in a thread-local fashion, so we need to be sure to apply the
         // thread-local attribute locally if it was present remotely. If we
@@ -267,7 +266,7 @@ pub fn trans_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
 
         let instance = Instance::mono(ccx.tcx(), def_id);
         let ty = common::instance_ty(ccx.tcx(), &instance);
-        let llty = ccx.layout_of(ty).llvm_type(ccx);
+        let llty = type_of::type_of(ccx, ty);
         let g = if val_llty == llty {
             g
         } else {
diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs
index b2bb605d01b..cb71ef104d3 100644
--- a/src/librustc_trans/context.rs
+++ b/src/librustc_trans/context.rs
@@ -24,14 +24,12 @@ use monomorphize::Instance;
 
 use partitioning::CodegenUnit;
 use type_::Type;
-use type_of::PointeeInfo;
-
 use rustc_data_structures::base_n;
 use rustc::middle::trans::Stats;
 use rustc_data_structures::stable_hasher::StableHashingContextProvider;
 use rustc::session::config::{self, NoDebugInfo};
 use rustc::session::Session;
-use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout};
+use rustc::ty::layout::{LayoutCx, LayoutError, LayoutTyper, TyLayout};
 use rustc::ty::{self, Ty, TyCtxt};
 use rustc::util::nodemap::FxHashMap;
 use rustc_trans_utils;
@@ -101,10 +99,10 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> {
     /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details
     used_statics: RefCell<Vec<ValueRef>>,
 
-    lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>>,
-    scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>,
-    pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
+    lltypes: RefCell<FxHashMap<Ty<'tcx>, Type>>,
     isize_ty: Type,
+    opaque_vec_type: Type,
+    str_slice_type: Type,
 
     dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
 
@@ -379,9 +377,9 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> {
                 statics_to_rauw: RefCell::new(Vec::new()),
                 used_statics: RefCell::new(Vec::new()),
                 lltypes: RefCell::new(FxHashMap()),
-                scalar_lltypes: RefCell::new(FxHashMap()),
-                pointee_infos: RefCell::new(FxHashMap()),
                 isize_ty: Type::from_ref(ptr::null_mut()),
+                opaque_vec_type: Type::from_ref(ptr::null_mut()),
+                str_slice_type: Type::from_ref(ptr::null_mut()),
                 dbg_cx,
                 eh_personality: Cell::new(None),
                 eh_unwind_resume: Cell::new(None),
@@ -391,19 +389,25 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> {
                 placeholder: PhantomData,
             };
 
-            let (isize_ty, mut local_ccx) = {
+            let (isize_ty, opaque_vec_type, str_slice_ty, mut local_ccx) = {
                 // Do a little dance to create a dummy CrateContext, so we can
                 // create some things in the LLVM module of this codegen unit
                 let mut local_ccxs = vec![local_ccx];
-                let isize_ty = {
+                let (isize_ty, opaque_vec_type, str_slice_ty) = {
                     let dummy_ccx = LocalCrateContext::dummy_ccx(shared,
                                                                  local_ccxs.as_mut_slice());
-                    Type::isize(&dummy_ccx)
+                    let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice");
+                    str_slice_ty.set_struct_body(&[Type::i8p(&dummy_ccx),
+                                                   Type::isize(&dummy_ccx)],
+                                                 false);
+                    (Type::isize(&dummy_ccx), Type::opaque_vec(&dummy_ccx), str_slice_ty)
                 };
-                (isize_ty, local_ccxs.pop().unwrap())
+                (isize_ty, opaque_vec_type, str_slice_ty, local_ccxs.pop().unwrap())
             };
 
             local_ccx.isize_ty = isize_ty;
+            local_ccx.opaque_vec_type = opaque_vec_type;
+            local_ccx.str_slice_type = str_slice_ty;
 
             local_ccx
         }
@@ -508,19 +512,10 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
         &self.local().used_statics
     }
 
-    pub fn lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<(Ty<'tcx>, Option<usize>), Type>> {
+    pub fn lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<Ty<'tcx>, Type>> {
         &self.local().lltypes
     }
 
-    pub fn scalar_lltypes<'a>(&'a self) -> &'a RefCell<FxHashMap<Ty<'tcx>, Type>> {
-        &self.local().scalar_lltypes
-    }
-
-    pub fn pointee_infos<'a>(&'a self)
-                             -> &'a RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>> {
-        &self.local().pointee_infos
-    }
-
     pub fn stats<'a>(&'a self) -> &'a RefCell<Stats> {
         &self.local().stats
     }
@@ -529,6 +524,10 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
         self.local().isize_ty
     }
 
+    pub fn str_slice_type(&self) -> Type {
+        self.local().str_slice_type
+    }
+
     pub fn dbg_cx<'a>(&'a self) -> &'a Option<debuginfo::CrateDebugContext<'tcx>> {
         &self.local().dbg_cx
     }
@@ -648,44 +647,48 @@ impl<'a, 'tcx> ty::layout::HasDataLayout for &'a SharedCrateContext<'a, 'tcx> {
     }
 }
 
-impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a SharedCrateContext<'a, 'tcx> {
-    fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
-        self.tcx
-    }
-}
-
 impl<'a, 'tcx> ty::layout::HasDataLayout for &'a CrateContext<'a, 'tcx> {
     fn data_layout(&self) -> &ty::layout::TargetDataLayout {
         &self.shared.tcx.data_layout
     }
 }
 
-impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CrateContext<'a, 'tcx> {
+impl<'a, 'tcx> LayoutTyper<'tcx> for &'a SharedCrateContext<'a, 'tcx> {
+    type TyLayout = TyLayout<'tcx>;
+
     fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
-        self.shared.tcx
+        self.tcx
     }
-}
-
-impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a SharedCrateContext<'a, 'tcx> {
-    type TyLayout = TyLayout<'tcx>;
 
     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
-        (self.tcx, ty::ParamEnv::empty(traits::Reveal::All))
+        let param_env = ty::ParamEnv::empty(traits::Reveal::All);
+        LayoutCx::new(self.tcx, param_env)
             .layout_of(ty)
             .unwrap_or_else(|e| match e {
                 LayoutError::SizeOverflow(_) => self.sess().fatal(&e.to_string()),
                 _ => bug!("failed to get layout for `{}`: {}", ty, e)
             })
     }
+
+    fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.tcx().fully_normalize_associated_types_in(&ty)
+    }
 }
 
-impl<'a, 'tcx> LayoutOf<Ty<'tcx>> for &'a CrateContext<'a, 'tcx> {
+impl<'a, 'tcx> LayoutTyper<'tcx> for &'a CrateContext<'a, 'tcx> {
     type TyLayout = TyLayout<'tcx>;
 
+    fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
+        self.shared.tcx
+    }
 
     fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
         self.shared.layout_of(ty)
     }
+
+    fn normalize_projections(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+        self.shared.normalize_projections(ty)
+    }
 }
 
 /// Declare any llvm intrinsics that you might need
diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs
index b2ad538a8ab..a68390eab7f 100644
--- a/src/librustc_trans/debuginfo/metadata.rs
+++ b/src/librustc_trans/debuginfo/metadata.rs
@@ -9,10 +9,11 @@
 // except according to those terms.
 
 use self::RecursiveTypeDescription::*;
+use self::MemberOffset::*;
 use self::MemberDescriptionFactory::*;
 use self::EnumDiscriminantInfo::*;
 
-use super::utils::{debug_context, DIB, span_start,
+use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of,
                    get_namespace_for_item, create_DIArray, is_node_local_to_unit};
 use super::namespace::mangled_name_of_item;
 use super::type_names::compute_debuginfo_type_name;
@@ -29,17 +30,19 @@ use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE};
 use rustc::ty::fold::TypeVisitor;
 use rustc::ty::subst::Substs;
 use rustc::ty::util::TypeIdHasher;
+use rustc::hir;
 use rustc::ich::Fingerprint;
+use {type_of, machine, monomorphize};
 use common::{self, CrateContext};
+use type_::Type;
 use rustc::ty::{self, AdtKind, Ty};
-use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout};
+use rustc::ty::layout::{self, LayoutTyper};
 use rustc::session::{Session, config};
 use rustc::util::nodemap::FxHashMap;
 use rustc::util::common::path2cstr;
 
 use libc::{c_uint, c_longlong};
 use std::ffi::CString;
-use std::fmt::Write;
 use std::ptr;
 use std::path::Path;
 use syntax::ast;
@@ -181,6 +184,7 @@ enum RecursiveTypeDescription<'tcx> {
         unfinished_type: Ty<'tcx>,
         unique_type_id: UniqueTypeId,
         metadata_stub: DICompositeType,
+        llvm_type: Type,
         member_description_factory: MemberDescriptionFactory<'tcx>,
     },
     FinalMetadata(DICompositeType)
@@ -191,6 +195,7 @@ fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>(
     unfinished_type: Ty<'tcx>,
     unique_type_id: UniqueTypeId,
     metadata_stub: DICompositeType,
+    llvm_type: Type,
     member_description_factory: MemberDescriptionFactory<'tcx>)
  -> RecursiveTypeDescription<'tcx> {
 
@@ -203,6 +208,7 @@ fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>(
         unfinished_type,
         unique_type_id,
         metadata_stub,
+        llvm_type,
         member_description_factory,
     }
 }
@@ -218,7 +224,9 @@ impl<'tcx> RecursiveTypeDescription<'tcx> {
                 unfinished_type,
                 unique_type_id,
                 metadata_stub,
+                llvm_type,
                 ref member_description_factory,
+                ..
             } => {
                 // Make sure that we have a forward declaration of the type in
                 // the TypeMap so that recursive references are possible. This
@@ -243,6 +251,7 @@ impl<'tcx> RecursiveTypeDescription<'tcx> {
                 // ... and attach them to the stub to complete it.
                 set_members_of_composite_type(cx,
                                               metadata_stub,
+                                              llvm_type,
                                               &member_descriptions[..]);
                 return MetadataCreationResult::new(metadata_stub, true);
             }
@@ -265,21 +274,20 @@ macro_rules! return_if_metadata_created_in_meantime {
 
 fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
                                 unique_type_id: UniqueTypeId,
-                                array_or_slice_type: Ty<'tcx>,
                                 element_type: Ty<'tcx>,
+                                len: Option<u64>,
                                 span: Span)
                                 -> MetadataCreationResult {
     let element_type_metadata = type_metadata(cx, element_type, span);
 
     return_if_metadata_created_in_meantime!(cx, unique_type_id);
 
-    let (size, align) = cx.size_and_align_of(array_or_slice_type);
+    let element_llvm_type = type_of::type_of(cx, element_type);
+    let (element_type_size, element_type_align) = size_and_align_of(cx, element_llvm_type);
 
-    let upper_bound = match array_or_slice_type.sty {
-        ty::TyArray(_, len) => {
-            len.val.to_const_int().unwrap().to_u64().unwrap() as c_longlong
-        }
-        _ => -1
+    let (array_size_in_bytes, upper_bound) = match len {
+        Some(len) => (element_type_size * len, len as c_longlong),
+        None => (0, -1)
     };
 
     let subrange = unsafe {
@@ -290,8 +298,8 @@ fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
     let metadata = unsafe {
         llvm::LLVMRustDIBuilderCreateArrayType(
             DIB(cx),
-            size.bits(),
-            align.abi_bits() as u32,
+            bytes_to_bits(array_size_in_bytes),
+            bytes_to_bits(element_type_align),
             element_type_metadata,
             subscripts)
     };
@@ -300,52 +308,66 @@ fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
 }
 
 fn vec_slice_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                slice_ptr_type: Ty<'tcx>,
+                                vec_type: Ty<'tcx>,
                                 element_type: Ty<'tcx>,
                                 unique_type_id: UniqueTypeId,
                                 span: Span)
                                 -> MetadataCreationResult {
-    let data_ptr_type = cx.tcx().mk_imm_ptr(element_type);
+    let data_ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut {
+        ty: element_type,
+        mutbl: hir::MutImmutable
+    });
 
-    let data_ptr_metadata = type_metadata(cx, data_ptr_type, span);
+    let element_type_metadata = type_metadata(cx, data_ptr_type, span);
 
     return_if_metadata_created_in_meantime!(cx, unique_type_id);
 
-    let slice_type_name = compute_debuginfo_type_name(cx, slice_ptr_type, true);
-
-    let (pointer_size, pointer_align) = cx.size_and_align_of(data_ptr_type);
-    let (usize_size, usize_align) = cx.size_and_align_of(cx.tcx().types.usize);
+    let slice_llvm_type = type_of::type_of(cx, vec_type);
+    let slice_type_name = compute_debuginfo_type_name(cx, vec_type, true);
 
+    let member_llvm_types = slice_llvm_type.field_types();
+    assert!(slice_layout_is_correct(cx,
+                                    &member_llvm_types[..],
+                                    element_type));
     let member_descriptions = [
         MemberDescription {
             name: "data_ptr".to_string(),
-            type_metadata: data_ptr_metadata,
-            offset: Size::from_bytes(0),
-            size: pointer_size,
-            align: pointer_align,
+            llvm_type: member_llvm_types[0],
+            type_metadata: element_type_metadata,
+            offset: ComputedMemberOffset,
             flags: DIFlags::FlagZero,
         },
         MemberDescription {
             name: "length".to_string(),
+            llvm_type: member_llvm_types[1],
             type_metadata: type_metadata(cx, cx.tcx().types.usize, span),
-            offset: pointer_size,
-            size: usize_size,
-            align: usize_align,
+            offset: ComputedMemberOffset,
             flags: DIFlags::FlagZero,
         },
     ];
 
+    assert!(member_descriptions.len() == member_llvm_types.len());
+
     let file_metadata = unknown_file_metadata(cx);
 
     let metadata = composite_type_metadata(cx,
-                                           slice_ptr_type,
+                                           slice_llvm_type,
                                            &slice_type_name[..],
                                            unique_type_id,
                                            &member_descriptions,
                                            NO_SCOPE_METADATA,
                                            file_metadata,
                                            span);
-    MetadataCreationResult::new(metadata, false)
+    return MetadataCreationResult::new(metadata, false);
+
+    fn slice_layout_is_correct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
+                                         member_llvm_types: &[Type],
+                                         element_type: Ty<'tcx>)
+                                         -> bool {
+        member_llvm_types.len() == 2 &&
+        member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() &&
+        member_llvm_types[1] == cx.isize_ty()
+    }
 }
 
 fn subroutine_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
@@ -414,38 +436,38 @@ fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
     let trait_type_name =
         compute_debuginfo_type_name(cx, trait_object_type, false);
 
+    let trait_llvm_type = type_of::type_of(cx, trait_object_type);
     let file_metadata = unknown_file_metadata(cx);
 
-    let layout = cx.layout_of(cx.tcx().mk_mut_ptr(trait_type));
+
+    let ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut {
+        ty: cx.tcx().types.u8,
+        mutbl: hir::MutImmutable
+    });
+    let ptr_type_metadata = type_metadata(cx, ptr_type, syntax_pos::DUMMY_SP);
+    let llvm_type = type_of::type_of(cx, ptr_type);
 
     assert_eq!(abi::FAT_PTR_ADDR, 0);
     assert_eq!(abi::FAT_PTR_EXTRA, 1);
-
-    let data_ptr_field = layout.field(cx, 0);
-    let vtable_field = layout.field(cx, 1);
     let member_descriptions = [
         MemberDescription {
             name: "pointer".to_string(),
-            type_metadata: type_metadata(cx,
-                cx.tcx().mk_mut_ptr(cx.tcx().types.u8),
-                syntax_pos::DUMMY_SP),
-            offset: layout.fields.offset(0),
-            size: data_ptr_field.size,
-            align: data_ptr_field.align,
+            llvm_type: llvm_type,
+            type_metadata: ptr_type_metadata,
+            offset: ComputedMemberOffset,
             flags: DIFlags::FlagArtificial,
         },
         MemberDescription {
             name: "vtable".to_string(),
-            type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP),
-            offset: layout.fields.offset(1),
-            size: vtable_field.size,
-            align: vtable_field.align,
+            llvm_type: llvm_type,
+            type_metadata: ptr_type_metadata,
+            offset: ComputedMemberOffset,
             flags: DIFlags::FlagArtificial,
         },
     ];
 
     composite_type_metadata(cx,
-                            trait_object_type,
+                            trait_llvm_type,
                             &trait_type_name[..],
                             unique_type_id,
                             &member_descriptions,
@@ -534,12 +556,15 @@ pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
         ty::TyTuple(ref elements, _) if elements.is_empty() => {
             MetadataCreationResult::new(basic_type_metadata(cx, t), false)
         }
-        ty::TyArray(typ, _) |
+        ty::TyArray(typ, len) => {
+            let len = len.val.to_const_int().unwrap().to_u64().unwrap();
+            fixed_vec_metadata(cx, unique_type_id, typ, Some(len), usage_site_span)
+        }
         ty::TySlice(typ) => {
-            fixed_vec_metadata(cx, unique_type_id, t, typ, usage_site_span)
+            fixed_vec_metadata(cx, unique_type_id, typ, None, usage_site_span)
         }
         ty::TyStr => {
-            fixed_vec_metadata(cx, unique_type_id, t, cx.tcx().types.i8, usage_site_span)
+            fixed_vec_metadata(cx, unique_type_id, cx.tcx().types.i8, None, usage_site_span)
         }
         ty::TyDynamic(..) => {
             MetadataCreationResult::new(
@@ -745,14 +770,15 @@ fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
         _ => bug!("debuginfo::basic_type_metadata - t is invalid type")
     };
 
-    let (size, align) = cx.size_and_align_of(t);
+    let llvm_type = type_of::type_of(cx, t);
+    let (size, align) = size_and_align_of(cx, llvm_type);
     let name = CString::new(name).unwrap();
     let ty_metadata = unsafe {
         llvm::LLVMRustDIBuilderCreateBasicType(
             DIB(cx),
             name.as_ptr(),
-            size.bits(),
-            align.abi_bits() as u32,
+            bytes_to_bits(size),
+            bytes_to_bits(align),
             encoding)
     };
 
@@ -764,25 +790,29 @@ fn foreign_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
                                    unique_type_id: UniqueTypeId) -> DIType {
     debug!("foreign_type_metadata: {:?}", t);
 
+    let llvm_type = type_of::type_of(cx, t);
+
     let name = compute_debuginfo_type_name(cx, t, false);
-    create_struct_stub(cx, t, &name, unique_type_id, NO_SCOPE_METADATA)
+    create_struct_stub(cx, llvm_type, &name, unique_type_id, NO_SCOPE_METADATA)
 }
 
 fn pointer_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
                                    pointer_type: Ty<'tcx>,
                                    pointee_type_metadata: DIType)
                                    -> DIType {
-    let (pointer_size, pointer_align) = cx.size_and_align_of(pointer_type);
+    let pointer_llvm_type = type_of::type_of(cx, pointer_type);
+    let (pointer_size, pointer_align) = size_and_align_of(cx, pointer_llvm_type);
     let name = compute_debuginfo_type_name(cx, pointer_type, false);
     let name = CString::new(name).unwrap();
-    unsafe {
+    let ptr_metadata = unsafe {
         llvm::LLVMRustDIBuilderCreatePointerType(
             DIB(cx),
             pointee_type_metadata,
-            pointer_size.bits(),
-            pointer_align.abi_bits() as u32,
+            bytes_to_bits(pointer_size),
+            bytes_to_bits(pointer_align),
             name.as_ptr())
-    }
+    };
+    return ptr_metadata;
 }
 
 pub fn compile_unit_metadata(scc: &SharedCrateContext,
@@ -877,15 +907,21 @@ impl MetadataCreationResult {
     }
 }
 
+#[derive(Debug)]
+enum MemberOffset {
+    FixedMemberOffset { bytes: usize },
+    // For ComputedMemberOffset, the offset is read from the llvm type definition.
+    ComputedMemberOffset
+}
+
 // Description of a type member, which can either be a regular field (as in
 // structs or tuples) or an enum variant.
 #[derive(Debug)]
 struct MemberDescription {
     name: String,
+    llvm_type: Type,
     type_metadata: DIType,
-    offset: Size,
-    size: Size,
-    align: Align,
+    offset: MemberOffset,
     flags: DIFlags,
 }
 
@@ -932,6 +968,7 @@ impl<'tcx> MemberDescriptionFactory<'tcx> {
 struct StructMemberDescriptionFactory<'tcx> {
     ty: Ty<'tcx>,
     variant: &'tcx ty::VariantDef,
+    substs: &'tcx Substs<'tcx>,
     span: Span,
 }
 
@@ -939,20 +976,35 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> {
     fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
                                       -> Vec<MemberDescription> {
         let layout = cx.layout_of(self.ty);
+
+        let tmp;
+        let offsets = match *layout {
+            layout::Univariant { ref variant, .. } => &variant.offsets,
+            layout::Vector { element, count } => {
+                let element_size = element.size(cx).bytes();
+                tmp = (0..count).
+                  map(|i| layout::Size::from_bytes(i*element_size))
+                  .collect::<Vec<layout::Size>>();
+                &tmp
+            }
+            _ => bug!("{} is not a struct", self.ty)
+        };
+
         self.variant.fields.iter().enumerate().map(|(i, f)| {
             let name = if self.variant.ctor_kind == CtorKind::Fn {
                 format!("__{}", i)
             } else {
                 f.name.to_string()
             };
-            let field = layout.field(cx, i);
-            let (size, align) = field.size_and_align();
+            let fty = monomorphize::field_ty(cx.tcx(), self.substs, f);
+
+            let offset = FixedMemberOffset { bytes: offsets[i].bytes() as usize};
+
             MemberDescription {
                 name,
-                type_metadata: type_metadata(cx, field.ty, self.span),
-                offset: layout.fields.offset(i),
-                size,
-                align,
+                llvm_type: type_of::in_memory_type_of(cx, fty),
+                type_metadata: type_metadata(cx, fty, self.span),
+                offset,
                 flags: DIFlags::FlagZero,
             }
         }).collect()
@@ -966,16 +1018,17 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
                                      span: Span)
                                      -> RecursiveTypeDescription<'tcx> {
     let struct_name = compute_debuginfo_type_name(cx, struct_type, false);
+    let struct_llvm_type = type_of::in_memory_type_of(cx, struct_type);
 
-    let (struct_def_id, variant) = match struct_type.sty {
-        ty::TyAdt(def, _) => (def.did, def.struct_variant()),
+    let (struct_def_id, variant, substs) = match struct_type.sty {
+        ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs),
         _ => bug!("prepare_struct_metadata on a non-ADT")
     };
 
     let containing_scope = get_namespace_for_item(cx, struct_def_id);
 
     let struct_metadata_stub = create_struct_stub(cx,
-                                                  struct_type,
+                                                  struct_llvm_type,
                                                   &struct_name,
                                                   unique_type_id,
                                                   containing_scope);
@@ -985,9 +1038,11 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
         struct_type,
         unique_type_id,
         struct_metadata_stub,
+        struct_llvm_type,
         StructMDF(StructMemberDescriptionFactory {
             ty: struct_type,
             variant,
+            substs,
             span,
         })
     )
@@ -1008,14 +1063,21 @@ impl<'tcx> TupleMemberDescriptionFactory<'tcx> {
     fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
                                       -> Vec<MemberDescription> {
         let layout = cx.layout_of(self.ty);
-        self.component_types.iter().enumerate().map(|(i, &component_type)| {
-            let (size, align) = cx.size_and_align_of(component_type);
+        let offsets = if let layout::Univariant { ref variant, .. } = *layout {
+            &variant.offsets
+        } else {
+            bug!("{} is not a tuple", self.ty);
+        };
+
+        self.component_types
+            .iter()
+            .enumerate()
+            .map(|(i, &component_type)| {
             MemberDescription {
                 name: format!("__{}", i),
+                llvm_type: type_of::type_of(cx, component_type),
                 type_metadata: type_metadata(cx, component_type, self.span),
-                offset: layout.fields.offset(i),
-                size,
-                align,
+                offset: FixedMemberOffset { bytes: offsets[i].bytes() as usize },
                 flags: DIFlags::FlagZero,
             }
         }).collect()
@@ -1029,16 +1091,18 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
                                     span: Span)
                                     -> RecursiveTypeDescription<'tcx> {
     let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false);
+    let tuple_llvm_type = type_of::type_of(cx, tuple_type);
 
     create_and_register_recursive_type_forward_declaration(
         cx,
         tuple_type,
         unique_type_id,
         create_struct_stub(cx,
-                           tuple_type,
+                           tuple_llvm_type,
                            &tuple_name[..],
                            unique_type_id,
                            NO_SCOPE_METADATA),
+        tuple_llvm_type,
         TupleMDF(TupleMemberDescriptionFactory {
             ty: tuple_type,
             component_types: component_types.to_vec(),
@@ -1052,23 +1116,21 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
 //=-----------------------------------------------------------------------------
 
 struct UnionMemberDescriptionFactory<'tcx> {
-    layout: TyLayout<'tcx>,
     variant: &'tcx ty::VariantDef,
+    substs: &'tcx Substs<'tcx>,
     span: Span,
 }
 
 impl<'tcx> UnionMemberDescriptionFactory<'tcx> {
     fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
                                       -> Vec<MemberDescription> {
-        self.variant.fields.iter().enumerate().map(|(i, f)| {
-            let field = self.layout.field(cx, i);
-            let (size, align) = field.size_and_align();
+        self.variant.fields.iter().map(|field| {
+            let fty = monomorphize::field_ty(cx.tcx(), self.substs, field);
             MemberDescription {
-                name: f.name.to_string(),
-                type_metadata: type_metadata(cx, field.ty, self.span),
-                offset: Size::from_bytes(0),
-                size,
-                align,
+                name: field.name.to_string(),
+                llvm_type: type_of::type_of(cx, fty),
+                type_metadata: type_metadata(cx, fty, self.span),
+                offset: FixedMemberOffset { bytes: 0 },
                 flags: DIFlags::FlagZero,
             }
         }).collect()
@@ -1081,16 +1143,17 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
                                     span: Span)
                                     -> RecursiveTypeDescription<'tcx> {
     let union_name = compute_debuginfo_type_name(cx, union_type, false);
+    let union_llvm_type = type_of::in_memory_type_of(cx, union_type);
 
-    let (union_def_id, variant) = match union_type.sty {
-        ty::TyAdt(def, _) => (def.did, def.struct_variant()),
+    let (union_def_id, variant, substs) = match union_type.sty {
+        ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs),
         _ => bug!("prepare_union_metadata on a non-ADT")
     };
 
     let containing_scope = get_namespace_for_item(cx, union_def_id);
 
     let union_metadata_stub = create_union_stub(cx,
-                                                union_type,
+                                                union_llvm_type,
                                                 &union_name,
                                                 unique_type_id,
                                                 containing_scope);
@@ -1100,9 +1163,10 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
         union_type,
         unique_type_id,
         union_metadata_stub,
+        union_llvm_type,
         UnionMDF(UnionMemberDescriptionFactory {
-            layout: cx.layout_of(union_type),
             variant,
+            substs,
             span,
         })
     )
@@ -1119,9 +1183,10 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
 // offset of zero bytes).
 struct EnumMemberDescriptionFactory<'tcx> {
     enum_type: Ty<'tcx>,
-    layout: TyLayout<'tcx>,
+    type_rep: &'tcx layout::Layout,
     discriminant_type_metadata: Option<DIType>,
     containing_scope: DIScope,
+    file_metadata: DIFile,
     span: Span,
 }
 
@@ -1129,70 +1194,162 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
     fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
                                       -> Vec<MemberDescription> {
         let adt = &self.enum_type.ty_adt_def().unwrap();
-        match self.layout.variants {
-            layout::Variants::Single { .. } if adt.variants.is_empty() => vec![],
-            layout::Variants::Single { index } => {
-                let (variant_type_metadata, member_description_factory) =
-                    describe_enum_variant(cx,
-                                          self.layout,
-                                          &adt.variants[index],
-                                          NoDiscriminant,
-                                          self.containing_scope,
-                                          self.span);
-
-                let member_descriptions =
-                    member_description_factory.create_member_descriptions(cx);
-
-                set_members_of_composite_type(cx,
-                                              variant_type_metadata,
-                                              &member_descriptions[..]);
-                vec![
-                    MemberDescription {
-                        name: "".to_string(),
-                        type_metadata: variant_type_metadata,
-                        offset: Size::from_bytes(0),
-                        size: self.layout.size,
-                        align: self.layout.align,
-                        flags: DIFlags::FlagZero
-                    }
-                ]
-            }
-            layout::Variants::Tagged { ref variants, .. } => {
+        let substs = match self.enum_type.sty {
+            ty::TyAdt(def, ref s) if def.adt_kind() == AdtKind::Enum => s,
+            _ => bug!("{} is not an enum", self.enum_type)
+        };
+        match *self.type_rep {
+            layout::General { ref variants, .. } => {
                 let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata
                     .expect(""));
-                (0..variants.len()).map(|i| {
-                    let variant = self.layout.for_variant(cx, i);
-                    let (variant_type_metadata, member_desc_factory) =
+                variants
+                    .iter()
+                    .enumerate()
+                    .map(|(i, struct_def)| {
+                        let (variant_type_metadata,
+                             variant_llvm_type,
+                             member_desc_factory) =
+                            describe_enum_variant(cx,
+                                                  self.enum_type,
+                                                  struct_def,
+                                                  &adt.variants[i],
+                                                  discriminant_info,
+                                                  self.containing_scope,
+                                                  self.span);
+
+                        let member_descriptions = member_desc_factory
+                            .create_member_descriptions(cx);
+
+                        set_members_of_composite_type(cx,
+                                                      variant_type_metadata,
+                                                      variant_llvm_type,
+                                                      &member_descriptions);
+                        MemberDescription {
+                            name: "".to_string(),
+                            llvm_type: variant_llvm_type,
+                            type_metadata: variant_type_metadata,
+                            offset: FixedMemberOffset { bytes: 0 },
+                            flags: DIFlags::FlagZero
+                        }
+                    }).collect()
+            },
+            layout::Univariant{ ref variant, .. } => {
+                assert!(adt.variants.len() <= 1);
+
+                if adt.variants.is_empty() {
+                    vec![]
+                } else {
+                    let (variant_type_metadata,
+                         variant_llvm_type,
+                         member_description_factory) =
                         describe_enum_variant(cx,
+                                              self.enum_type,
                                               variant,
-                                              &adt.variants[i],
-                                              discriminant_info,
+                                              &adt.variants[0],
+                                              NoDiscriminant,
                                               self.containing_scope,
                                               self.span);
 
-                    let member_descriptions = member_desc_factory
-                        .create_member_descriptions(cx);
+                    let member_descriptions =
+                        member_description_factory.create_member_descriptions(cx);
 
                     set_members_of_composite_type(cx,
                                                   variant_type_metadata,
-                                                  &member_descriptions);
+                                                  variant_llvm_type,
+                                                  &member_descriptions[..]);
+                    vec![
+                        MemberDescription {
+                            name: "".to_string(),
+                            llvm_type: variant_llvm_type,
+                            type_metadata: variant_type_metadata,
+                            offset: FixedMemberOffset { bytes: 0 },
+                            flags: DIFlags::FlagZero
+                        }
+                    ]
+                }
+            }
+            layout::RawNullablePointer { nndiscr: non_null_variant_index, .. } => {
+                // As far as debuginfo is concerned, the pointer this enum
+                // represents is still wrapped in a struct. This is to make the
+                // DWARF representation of enums uniform.
+
+                // First create a description of the artificial wrapper struct:
+                let non_null_variant = &adt.variants[non_null_variant_index as usize];
+                let non_null_variant_name = non_null_variant.name.as_str();
+
+                // The llvm type and metadata of the pointer
+                let nnty = monomorphize::field_ty(cx.tcx(), &substs, &non_null_variant.fields[0] );
+                let non_null_llvm_type = type_of::type_of(cx, nnty);
+                let non_null_type_metadata = type_metadata(cx, nnty, self.span);
+
+                // The type of the artificial struct wrapping the pointer
+                let artificial_struct_llvm_type = Type::struct_(cx,
+                                                                &[non_null_llvm_type],
+                                                                false);
+
+                // For the metadata of the wrapper struct, we need to create a
+                // MemberDescription of the struct's single field.
+                let sole_struct_member_description = MemberDescription {
+                    name: match non_null_variant.ctor_kind {
+                        CtorKind::Fn => "__0".to_string(),
+                        CtorKind::Fictive => {
+                            non_null_variant.fields[0].name.to_string()
+                        }
+                        CtorKind::Const => bug!()
+                    },
+                    llvm_type: non_null_llvm_type,
+                    type_metadata: non_null_type_metadata,
+                    offset: FixedMemberOffset { bytes: 0 },
+                    flags: DIFlags::FlagZero
+                };
+
+                let unique_type_id = debug_context(cx).type_map
+                                                      .borrow_mut()
+                                                      .get_unique_type_id_of_enum_variant(
+                                                          cx,
+                                                          self.enum_type,
+                                                          &non_null_variant_name);
+
+                // Now we can create the metadata of the artificial struct
+                let artificial_struct_metadata =
+                    composite_type_metadata(cx,
+                                            artificial_struct_llvm_type,
+                                            &non_null_variant_name,
+                                            unique_type_id,
+                                            &[sole_struct_member_description],
+                                            self.containing_scope,
+                                            self.file_metadata,
+                                            syntax_pos::DUMMY_SP);
+
+                // Encode the information about the null variant in the union
+                // member's name.
+                let null_variant_index = (1 - non_null_variant_index) as usize;
+                let null_variant_name = adt.variants[null_variant_index].name;
+                let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
+                                                0,
+                                                null_variant_name);
+
+                // Finally create the (singleton) list of descriptions of union
+                // members.
+                vec![
                     MemberDescription {
-                        name: "".to_string(),
-                        type_metadata: variant_type_metadata,
-                        offset: Size::from_bytes(0),
-                        size: variant.size,
-                        align: variant.align,
+                        name: union_member_name,
+                        llvm_type: artificial_struct_llvm_type,
+                        type_metadata: artificial_struct_metadata,
+                        offset: FixedMemberOffset { bytes: 0 },
                         flags: DIFlags::FlagZero
                     }
-                }).collect()
-            }
-            layout::Variants::NicheFilling { dataful_variant, ref niche_variants, .. } => {
-                let variant = self.layout.for_variant(cx, dataful_variant);
+                ]
+            },
+            layout::StructWrappedNullablePointer { nonnull: ref struct_def,
+                                                nndiscr,
+                                                ref discrfield_source, ..} => {
                 // Create a description of the non-null variant
-                let (variant_type_metadata, member_description_factory) =
+                let (variant_type_metadata, variant_llvm_type, member_description_factory) =
                     describe_enum_variant(cx,
-                                          variant,
-                                          &adt.variants[dataful_variant],
+                                          self.enum_type,
+                                          struct_def,
+                                          &adt.variants[nndiscr as usize],
                                           OptimizedDiscriminant,
                                           self.containing_scope,
                                           self.span);
@@ -1202,51 +1359,34 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
 
                 set_members_of_composite_type(cx,
                                               variant_type_metadata,
+                                              variant_llvm_type,
                                               &variant_member_descriptions[..]);
 
                 // Encode the information about the null variant in the union
                 // member's name.
-                let mut name = String::from("RUST$ENCODED$ENUM$");
-                // HACK(eddyb) the debuggers should just handle offset+size
-                // of discriminant instead of us having to recover its path.
-                // Right now it's not even going to work for `niche_start > 0`,
-                // and for multiple niche variants it only supports the first.
-                fn compute_field_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                                name: &mut String,
-                                                layout: TyLayout<'tcx>,
-                                                offset: Size,
-                                                size: Size) {
-                    for i in 0..layout.fields.count() {
-                        let field_offset = layout.fields.offset(i);
-                        if field_offset > offset {
-                            continue;
-                        }
-                        let inner_offset = offset - field_offset;
-                        let field = layout.field(ccx, i);
-                        if inner_offset + size <= field.size {
-                            write!(name, "{}$", i).unwrap();
-                            compute_field_path(ccx, name, field, inner_offset, size);
-                        }
-                    }
-                }
-                compute_field_path(cx, &mut name,
-                                   self.layout,
-                                   self.layout.fields.offset(0),
-                                   self.layout.field(cx, 0).size);
-                name.push_str(&adt.variants[niche_variants.start].name.as_str());
+                let null_variant_index = (1 - nndiscr) as usize;
+                let null_variant_name = adt.variants[null_variant_index].name;
+                let discrfield_source = discrfield_source.iter()
+                                           .skip(1)
+                                           .map(|x| x.to_string())
+                                           .collect::<Vec<_>>().join("$");
+                let union_member_name = format!("RUST$ENCODED$ENUM${}${}",
+                                                discrfield_source,
+                                                null_variant_name);
 
                 // Create the (singleton) list of descriptions of union members.
                 vec![
                     MemberDescription {
-                        name,
+                        name: union_member_name,
+                        llvm_type: variant_llvm_type,
                         type_metadata: variant_type_metadata,
-                        offset: Size::from_bytes(0),
-                        size: variant.size,
-                        align: variant.align,
+                        offset: FixedMemberOffset { bytes: 0 },
                         flags: DIFlags::FlagZero
                     }
                 ]
-            }
+            },
+            layout::CEnum { .. } => span_bug!(self.span, "This should be unreachable."),
+            ref l @ _ => bug!("Not an enum layout: {:#?}", l)
         }
     }
 }
@@ -1254,7 +1394,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
 // Creates MemberDescriptions for the fields of a single enum variant.
 struct VariantMemberDescriptionFactory<'tcx> {
     // Cloned from the layout::Struct describing the variant.
-    offsets: Vec<layout::Size>,
+    offsets: &'tcx [layout::Size],
     args: Vec<(String, Ty<'tcx>)>,
     discriminant_type_metadata: Option<DIType>,
     span: Span,
@@ -1264,16 +1404,14 @@ impl<'tcx> VariantMemberDescriptionFactory<'tcx> {
     fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>)
                                       -> Vec<MemberDescription> {
         self.args.iter().enumerate().map(|(i, &(ref name, ty))| {
-            let (size, align) = cx.size_and_align_of(ty);
             MemberDescription {
                 name: name.to_string(),
+                llvm_type: type_of::type_of(cx, ty),
                 type_metadata: match self.discriminant_type_metadata {
                     Some(metadata) if i == 0 => metadata,
                     _ => type_metadata(cx, ty, self.span)
                 },
-                offset: self.offsets[i],
-                size,
-                align,
+                offset: FixedMemberOffset { bytes: self.offsets[i].bytes() as usize },
                 flags: DIFlags::FlagZero
             }
         }).collect()
@@ -1292,52 +1430,92 @@ enum EnumDiscriminantInfo {
 // descriptions of the fields of the variant. This is a rudimentary version of a
 // full RecursiveTypeDescription.
 fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                   layout: layout::TyLayout<'tcx>,
+                                   enum_type: Ty<'tcx>,
+                                   struct_def: &'tcx layout::Struct,
                                    variant: &'tcx ty::VariantDef,
                                    discriminant_info: EnumDiscriminantInfo,
                                    containing_scope: DIScope,
                                    span: Span)
-                                   -> (DICompositeType, MemberDescriptionFactory<'tcx>) {
+                                   -> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) {
+    let substs = match enum_type.sty {
+        ty::TyAdt(def, s) if def.adt_kind() == AdtKind::Enum => s,
+        ref t @ _ => bug!("{:#?} is not an enum", t)
+    };
+
+    let maybe_discr_and_signed: Option<(layout::Integer, bool)> = match *cx.layout_of(enum_type) {
+        layout::CEnum {discr, ..} => Some((discr, true)),
+        layout::General{discr, ..} => Some((discr, false)),
+        layout::Univariant { .. }
+        | layout::RawNullablePointer { .. }
+        | layout::StructWrappedNullablePointer { .. } => None,
+        ref l @ _ => bug!("This should be unreachable. Type is {:#?} layout is {:#?}", enum_type, l)
+    };
+
+    let mut field_tys = variant.fields.iter().map(|f| {
+        monomorphize::field_ty(cx.tcx(), &substs, f)
+    }).collect::<Vec<_>>();
+
+    if let Some((discr, signed)) = maybe_discr_and_signed {
+        field_tys.insert(0, discr.to_ty(&cx.tcx(), signed));
+    }
+
+
+    let variant_llvm_type =
+        Type::struct_(cx, &field_tys
+                                    .iter()
+                                    .map(|t| type_of::type_of(cx, t))
+                                    .collect::<Vec<_>>()
+                                    ,
+                      struct_def.packed);
+    // Could do some consistency checks here: size, align, field count, discr type
+
     let variant_name = variant.name.as_str();
     let unique_type_id = debug_context(cx).type_map
                                           .borrow_mut()
                                           .get_unique_type_id_of_enum_variant(
                                               cx,
-                                              layout.ty,
+                                              enum_type,
                                               &variant_name);
 
     let metadata_stub = create_struct_stub(cx,
-                                           layout.ty,
+                                           variant_llvm_type,
                                            &variant_name,
                                            unique_type_id,
                                            containing_scope);
 
-    // If this is not a univariant enum, there is also the discriminant field.
-    let (discr_offset, discr_arg) = match discriminant_info {
-        RegularDiscriminant(_) => {
-            let enum_layout = cx.layout_of(layout.ty);
-            (Some(enum_layout.fields.offset(0)),
-             Some(("RUST$ENUM$DISR".to_string(), enum_layout.field(cx, 0).ty)))
+    // Get the argument names from the enum variant info
+    let mut arg_names: Vec<_> = match variant.ctor_kind {
+        CtorKind::Const => vec![],
+        CtorKind::Fn => {
+            variant.fields
+                   .iter()
+                   .enumerate()
+                   .map(|(i, _)| format!("__{}", i))
+                   .collect()
+        }
+        CtorKind::Fictive => {
+            variant.fields
+                   .iter()
+                   .map(|f| f.name.to_string())
+                   .collect()
         }
-        _ => (None, None),
     };
-    let offsets = discr_offset.into_iter().chain((0..layout.fields.count()).map(|i| {
-        layout.fields.offset(i)
-    })).collect();
+
+    // If this is not a univariant enum, there is also the discriminant field.
+    match discriminant_info {
+        RegularDiscriminant(_) => arg_names.insert(0, "RUST$ENUM$DISR".to_string()),
+        _ => { /* do nothing */ }
+    };
 
     // Build an array of (field name, field type) pairs to be captured in the factory closure.
-    let args = discr_arg.into_iter().chain((0..layout.fields.count()).map(|i| {
-        let name = if variant.ctor_kind == CtorKind::Fn {
-            format!("__{}", i)
-        } else {
-            variant.fields[i].name.to_string()
-        };
-        (name, layout.field(cx, i).ty)
-    })).collect();
+    let args: Vec<(String, Ty)> = arg_names.iter()
+        .zip(field_tys.iter())
+        .map(|(s, &t)| (s.to_string(), t))
+        .collect();
 
     let member_description_factory =
         VariantMDF(VariantMemberDescriptionFactory {
-            offsets,
+            offsets: &struct_def.offsets[..],
             args,
             discriminant_type_metadata: match discriminant_info {
                 RegularDiscriminant(discriminant_type_metadata) => {
@@ -1348,7 +1526,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
             span,
         });
 
-    (metadata_stub, member_description_factory)
+    (metadata_stub, variant_llvm_type, member_description_factory)
 }
 
 fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
@@ -1384,18 +1562,21 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
         })
         .collect();
 
-    let discriminant_type_metadata = |discr: layout::Primitive| {
-        let disr_type_key = (enum_def_id, discr);
+    let discriminant_type_metadata = |inttype: layout::Integer, signed: bool| {
+        let disr_type_key = (enum_def_id, inttype);
         let cached_discriminant_type_metadata = debug_context(cx).created_enum_disr_types
                                                                  .borrow()
                                                                  .get(&disr_type_key).cloned();
         match cached_discriminant_type_metadata {
             Some(discriminant_type_metadata) => discriminant_type_metadata,
             None => {
+                let discriminant_llvm_type = Type::from_integer(cx, inttype);
                 let (discriminant_size, discriminant_align) =
-                    (discr.size(cx), discr.align(cx));
+                    size_and_align_of(cx, discriminant_llvm_type);
                 let discriminant_base_type_metadata =
-                    type_metadata(cx, discr.to_ty(cx.tcx()), syntax_pos::DUMMY_SP);
+                    type_metadata(cx,
+                                  inttype.to_ty(&cx.tcx(), signed),
+                                  syntax_pos::DUMMY_SP);
                 let discriminant_name = get_enum_discriminant_name(cx, enum_def_id);
 
                 let name = CString::new(discriminant_name.as_bytes()).unwrap();
@@ -1406,8 +1587,8 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
                         name.as_ptr(),
                         file_metadata,
                         UNKNOWN_LINE_NUMBER,
-                        discriminant_size.bits(),
-                        discriminant_align.abi_bits() as u32,
+                        bytes_to_bits(discriminant_size),
+                        bytes_to_bits(discriminant_align),
                         create_DIArray(DIB(cx), &enumerators_metadata),
                         discriminant_base_type_metadata)
                 };
@@ -1421,22 +1602,21 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
         }
     };
 
-    let layout = cx.layout_of(enum_type);
+    let type_rep = cx.layout_of(enum_type);
 
-    let discriminant_type_metadata = match layout.variants {
-        layout::Variants::Single { .. } |
-        layout::Variants::NicheFilling { .. } => None,
-        layout::Variants::Tagged { ref discr, .. } => {
-            Some(discriminant_type_metadata(discr.value))
-        }
+    let discriminant_type_metadata = match *type_rep {
+        layout::CEnum { discr, signed, .. } => {
+            return FinalMetadata(discriminant_type_metadata(discr, signed))
+        },
+        layout::RawNullablePointer { .. }           |
+        layout::StructWrappedNullablePointer { .. } |
+        layout::Univariant { .. }                      => None,
+        layout::General { discr, .. } => Some(discriminant_type_metadata(discr, false)),
+        ref l @ _ => bug!("Not an enum layout: {:#?}", l)
     };
 
-    match (&layout.abi, discriminant_type_metadata) {
-        (&layout::Abi::Scalar(_), Some(discr)) => return FinalMetadata(discr),
-        _ => {}
-    }
-
-    let (enum_type_size, enum_type_align) = layout.size_and_align();
+    let enum_llvm_type = type_of::type_of(cx, enum_type);
+    let (enum_type_size, enum_type_align) = size_and_align_of(cx, enum_llvm_type);
 
     let enum_name = CString::new(enum_name).unwrap();
     let unique_type_id_str = CString::new(
@@ -1449,8 +1629,8 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
         enum_name.as_ptr(),
         file_metadata,
         UNKNOWN_LINE_NUMBER,
-        enum_type_size.bits(),
-        enum_type_align.abi_bits() as u32,
+        bytes_to_bits(enum_type_size),
+        bytes_to_bits(enum_type_align),
         DIFlags::FlagZero,
         ptr::null_mut(),
         0, // RuntimeLang
@@ -1462,11 +1642,13 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
         enum_type,
         unique_type_id,
         enum_metadata,
+        enum_llvm_type,
         EnumMDF(EnumMemberDescriptionFactory {
             enum_type,
-            layout,
+            type_rep: type_rep.layout,
             discriminant_type_metadata,
             containing_scope,
+            file_metadata,
             span,
         }),
     );
@@ -1482,27 +1664,28 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
 /// results in a LLVM struct.
 ///
 /// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums.
-fn composite_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                     composite_type: Ty<'tcx>,
-                                     composite_type_name: &str,
-                                     composite_type_unique_id: UniqueTypeId,
-                                     member_descriptions: &[MemberDescription],
-                                     containing_scope: DIScope,
-
-                                     // Ignore source location information as long as it
-                                     // can't be reconstructed for non-local crates.
-                                     _file_metadata: DIFile,
-                                     _definition_span: Span)
-                                     -> DICompositeType {
+fn composite_type_metadata(cx: &CrateContext,
+                           composite_llvm_type: Type,
+                           composite_type_name: &str,
+                           composite_type_unique_id: UniqueTypeId,
+                           member_descriptions: &[MemberDescription],
+                           containing_scope: DIScope,
+
+                           // Ignore source location information as long as it
+                           // can't be reconstructed for non-local crates.
+                           _file_metadata: DIFile,
+                           _definition_span: Span)
+                           -> DICompositeType {
     // Create the (empty) struct metadata node ...
     let composite_type_metadata = create_struct_stub(cx,
-                                                     composite_type,
+                                                     composite_llvm_type,
                                                      composite_type_name,
                                                      composite_type_unique_id,
                                                      containing_scope);
     // ... and immediately create and add the member descriptions.
     set_members_of_composite_type(cx,
                                   composite_type_metadata,
+                                  composite_llvm_type,
                                   member_descriptions);
 
     return composite_type_metadata;
@@ -1510,6 +1693,7 @@ fn composite_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
 
 fn set_members_of_composite_type(cx: &CrateContext,
                                  composite_type_metadata: DICompositeType,
+                                 composite_llvm_type: Type,
                                  member_descriptions: &[MemberDescription]) {
     // In some rare cases LLVM metadata uniquing would lead to an existing type
     // description being used instead of a new one created in
@@ -1530,7 +1714,14 @@ fn set_members_of_composite_type(cx: &CrateContext,
 
     let member_metadata: Vec<DIDescriptor> = member_descriptions
         .iter()
-        .map(|member_description| {
+        .enumerate()
+        .map(|(i, member_description)| {
+            let (member_size, member_align) = size_and_align_of(cx, member_description.llvm_type);
+            let member_offset = match member_description.offset {
+                FixedMemberOffset { bytes } => bytes as u64,
+                ComputedMemberOffset => machine::llelement_offset(cx, composite_llvm_type, i)
+            };
+
             let member_name = member_description.name.as_bytes();
             let member_name = CString::new(member_name).unwrap();
             unsafe {
@@ -1540,9 +1731,9 @@ fn set_members_of_composite_type(cx: &CrateContext,
                     member_name.as_ptr(),
                     unknown_file_metadata(cx),
                     UNKNOWN_LINE_NUMBER,
-                    member_description.size.bits(),
-                    member_description.align.abi_bits() as u32,
-                    member_description.offset.bits(),
+                    bytes_to_bits(member_size),
+                    bytes_to_bits(member_align),
+                    bytes_to_bits(member_offset),
                     member_description.flags,
                     member_description.type_metadata)
             }
@@ -1559,13 +1750,13 @@ fn set_members_of_composite_type(cx: &CrateContext,
 // A convenience wrapper around LLVMRustDIBuilderCreateStructType(). Does not do
 // any caching, does not add any fields to the struct. This can be done later
 // with set_members_of_composite_type().
-fn create_struct_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                                struct_type: Ty<'tcx>,
-                                struct_type_name: &str,
-                                unique_type_id: UniqueTypeId,
-                                containing_scope: DIScope)
-                                -> DICompositeType {
-    let (struct_size, struct_align) = cx.size_and_align_of(struct_type);
+fn create_struct_stub(cx: &CrateContext,
+                      struct_llvm_type: Type,
+                      struct_type_name: &str,
+                      unique_type_id: UniqueTypeId,
+                      containing_scope: DIScope)
+                   -> DICompositeType {
+    let (struct_size, struct_align) = size_and_align_of(cx, struct_llvm_type);
 
     let name = CString::new(struct_type_name).unwrap();
     let unique_type_id = CString::new(
@@ -1583,8 +1774,8 @@ fn create_struct_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
             name.as_ptr(),
             unknown_file_metadata(cx),
             UNKNOWN_LINE_NUMBER,
-            struct_size.bits(),
-            struct_align.abi_bits() as u32,
+            bytes_to_bits(struct_size),
+            bytes_to_bits(struct_align),
             DIFlags::FlagZero,
             ptr::null_mut(),
             empty_array,
@@ -1596,13 +1787,13 @@ fn create_struct_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
     return metadata_stub;
 }
 
-fn create_union_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
-                               union_type: Ty<'tcx>,
-                               union_type_name: &str,
-                               unique_type_id: UniqueTypeId,
-                               containing_scope: DIScope)
-                               -> DICompositeType {
-    let (union_size, union_align) = cx.size_and_align_of(union_type);
+fn create_union_stub(cx: &CrateContext,
+                     union_llvm_type: Type,
+                     union_type_name: &str,
+                     unique_type_id: UniqueTypeId,
+                     containing_scope: DIScope)
+                   -> DICompositeType {
+    let (union_size, union_align) = size_and_align_of(cx, union_llvm_type);
 
     let name = CString::new(union_type_name).unwrap();
     let unique_type_id = CString::new(
@@ -1620,8 +1811,8 @@ fn create_union_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
             name.as_ptr(),
             unknown_file_metadata(cx),
             UNKNOWN_LINE_NUMBER,
-            union_size.bits(),
-            union_align.abi_bits() as u32,
+            bytes_to_bits(union_size),
+            bytes_to_bits(union_align),
             DIFlags::FlagZero,
             empty_array,
             0, // RuntimeLang
@@ -1676,7 +1867,7 @@ pub fn create_global_var_metadata(cx: &CrateContext,
                                                     is_local_to_unit,
                                                     global,
                                                     ptr::null_mut(),
-                                                    global_align.abi() as u32,
+                                                    global_align,
         );
     }
 }
@@ -1708,6 +1899,8 @@ pub fn create_vtable_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
     }
 
     let type_metadata = type_metadata(cx, ty, syntax_pos::DUMMY_SP);
+    let llvm_vtable_type = Type::vtable_ptr(cx).element_type();
+    let (struct_size, struct_align) = size_and_align_of(cx, llvm_vtable_type);
 
     unsafe {
         // LLVMRustDIBuilderCreateStructType() wants an empty array. A null
@@ -1726,8 +1919,8 @@ pub fn create_vtable_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
             name.as_ptr(),
             unknown_file_metadata(cx),
             UNKNOWN_LINE_NUMBER,
-            Size::from_bytes(0).bits(),
-            cx.tcx().data_layout.pointer_align.abi_bits() as u32,
+            bytes_to_bits(struct_size),
+            bytes_to_bits(struct_align),
             DIFlags::FlagArtificial,
             ptr::null_mut(),
             empty_array,
diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs
index c0df25202d8..15b299674ee 100644
--- a/src/librustc_trans/debuginfo/mod.rs
+++ b/src/librustc_trans/debuginfo/mod.rs
@@ -43,7 +43,7 @@ use std::ptr;
 use syntax_pos::{self, Span, Pos};
 use syntax::ast;
 use syntax::symbol::Symbol;
-use rustc::ty::layout::{self, LayoutOf};
+use rustc::ty::layout::{self, LayoutTyper};
 
 pub mod gdb;
 mod utils;
@@ -71,7 +71,7 @@ pub struct CrateDebugContext<'tcx> {
     llmod: ModuleRef,
     builder: DIBuilderRef,
     created_files: RefCell<FxHashMap<(Symbol, Symbol), DIFile>>,
-    created_enum_disr_types: RefCell<FxHashMap<(DefId, layout::Primitive), DIType>>,
+    created_enum_disr_types: RefCell<FxHashMap<(DefId, layout::Integer), DIType>>,
 
     type_map: RefCell<TypeMap<'tcx>>,
     namespace_map: RefCell<DefIdMap<DIScope>>,
@@ -335,7 +335,8 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
             signature.extend(inputs.iter().map(|&t| {
                 let t = match t.sty {
                     ty::TyArray(ct, _)
-                        if (ct == cx.tcx().types.u8) || cx.layout_of(ct).is_zst() => {
+                        if (ct == cx.tcx().types.u8) ||
+                           (cx.layout_of(ct).size(cx).bytes() == 0) => {
                         cx.tcx().mk_imm_ptr(ct)
                     }
                     _ => t
@@ -498,7 +499,7 @@ pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     cx.sess().opts.optimize != config::OptLevel::No,
                     DIFlags::FlagZero,
                     argument_index,
-                    align.abi() as u32,
+                    align,
                 )
             };
             source_loc::set_debug_location(bcx,
diff --git a/src/librustc_trans/debuginfo/utils.rs b/src/librustc_trans/debuginfo/utils.rs
index 95427d9b3cd..ad4fdfca726 100644
--- a/src/librustc_trans/debuginfo/utils.rs
+++ b/src/librustc_trans/debuginfo/utils.rs
@@ -18,11 +18,15 @@ use rustc::ty::DefIdTree;
 
 use llvm;
 use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray};
+use machine;
 use common::{CrateContext};
+use type_::Type;
 
 use syntax_pos::{self, Span};
 use syntax::ast;
 
+use std::ops;
+
 pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool
 {
     // The is_local_to_unit flag indicates whether a function is local to the
@@ -49,6 +53,15 @@ pub fn span_start(cx: &CrateContext, span: Span) -> syntax_pos::Loc {
     cx.sess().codemap().lookup_char_pos(span.lo())
 }
 
+pub fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u32) {
+    (machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type))
+}
+
+pub fn bytes_to_bits<T>(bytes: T) -> T
+    where T: ops::Mul<Output=T> + From<u8> {
+    bytes * 8u8.into()
+}
+
 #[inline]
 pub fn debug_context<'a, 'tcx>(cx: &'a CrateContext<'a, 'tcx>)
                            -> &'a CrateDebugContext<'tcx> {
diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs
index 6c7d7700ade..453b98a1d74 100644
--- a/src/librustc_trans/glue.rs
+++ b/src/librustc_trans/glue.rs
@@ -19,7 +19,8 @@ use common::*;
 use llvm::{ValueRef};
 use llvm;
 use meth;
-use rustc::ty::layout::LayoutOf;
+use monomorphize;
+use rustc::ty::layout::LayoutTyper;
 use rustc::ty::{self, Ty};
 use value::Value;
 
@@ -28,28 +29,17 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
     debug!("calculate size of DST: {}; with lost info: {:?}",
            t, Value(info));
     if bcx.ccx.shared().type_is_sized(t) {
-        let (size, align) = bcx.ccx.size_and_align_of(t);
-        debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
+        let size = bcx.ccx.size_of(t);
+        let align = bcx.ccx.align_of(t);
+        debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}",
                t, Value(info), size, align);
-        let size = C_usize(bcx.ccx, size.bytes());
-        let align = C_usize(bcx.ccx, align.abi());
+        let size = C_usize(bcx.ccx, size);
+        let align = C_usize(bcx.ccx, align as u64);
         return (size, align);
     }
     assert!(!info.is_null());
     match t.sty {
-        ty::TyDynamic(..) => {
-            // load size/align from vtable
-            (meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info))
-        }
-        ty::TySlice(_) | ty::TyStr => {
-            let unit = t.sequence_element_type(bcx.tcx());
-            // The info in this case is the length of the str, so the size is that
-            // times the unit size.
-            let (size, align) = bcx.ccx.size_and_align_of(unit);
-            (bcx.mul(info, C_usize(bcx.ccx, size.bytes())),
-             C_usize(bcx.ccx, align.abi()))
-        }
-        _ => {
+        ty::TyAdt(..) | ty::TyTuple(..) => {
             let ccx = bcx.ccx;
             // First get the size of all statically known fields.
             // Don't use size_of because it also rounds up to alignment, which we
@@ -58,9 +48,15 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
             let layout = ccx.layout_of(t);
             debug!("DST {} layout: {:?}", t, layout);
 
-            let i = layout.fields.count() - 1;
-            let sized_size = layout.fields.offset(i).bytes();
-            let sized_align = layout.align.abi();
+            let (sized_size, sized_align) = match *layout {
+                ty::layout::Layout::Univariant { ref variant, .. } => {
+                    (variant.offsets.last().map_or(0, |o| o.bytes()), variant.align.abi())
+                }
+                _ => {
+                    bug!("size_and_align_of_dst: expcted Univariant for `{}`, found {:#?}",
+                         t, layout);
+                }
+            };
             debug!("DST {} statically sized prefix size: {} align: {}",
                    t, sized_size, sized_align);
             let sized_size = C_usize(ccx, sized_size);
@@ -68,7 +64,14 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
 
             // Recurse to get the size of the dynamically sized field (must be
             // the last field).
-            let field_ty = layout.field(ccx, i).ty;
+            let field_ty = match t.sty {
+                ty::TyAdt(def, substs) => {
+                    let last_field = def.struct_variant().fields.last().unwrap();
+                    monomorphize::field_ty(bcx.tcx(), substs, last_field)
+                },
+                ty::TyTuple(tys, _) => tys.last().unwrap(),
+                _ => unreachable!(),
+            };
             let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
 
             // FIXME (#26403, #27023): We should be adding padding
@@ -111,5 +114,17 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
 
             (size, align)
         }
+        ty::TyDynamic(..) => {
+            // load size/align from vtable
+            (meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info))
+        }
+        ty::TySlice(_) | ty::TyStr => {
+            let unit = t.sequence_element_type(bcx.tcx());
+            // The info in this case is the length of the str, so the size is that
+            // times the unit size.
+            (bcx.mul(info, C_usize(bcx.ccx, bcx.ccx.size_of(unit))),
+             C_usize(bcx.ccx, bcx.ccx.align_of(unit) as u64))
+        }
+        _ => bug!("Unexpected unsized type, found {}", t)
     }
 }
diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs
index adbb45f893b..2f1a95038ea 100644
--- a/src/librustc_trans/intrinsic.rs
+++ b/src/librustc_trans/intrinsic.rs
@@ -11,19 +11,20 @@
 #![allow(non_upper_case_globals)]
 
 use intrinsics::{self, Intrinsic};
+use libc;
 use llvm;
 use llvm::{ValueRef};
-use abi::{Abi, FnType, PassMode};
+use abi::{Abi, FnType};
+use adt;
 use mir::lvalue::{LvalueRef, Alignment};
-use mir::operand::{OperandRef, OperandValue};
 use base::*;
 use common::*;
 use declare;
 use glue;
+use type_of;
+use machine;
 use type_::Type;
-use type_of::LayoutLlvmExt;
 use rustc::ty::{self, Ty};
-use rustc::ty::layout::{HasDataLayout, LayoutOf};
 use rustc::hir;
 use syntax::ast;
 use syntax::symbol::Symbol;
@@ -87,8 +88,8 @@ fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
 /// add them to librustc_trans/trans/context.rs
 pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                                       callee_ty: Ty<'tcx>,
-                                      fn_ty: &FnType<'tcx>,
-                                      args: &[OperandRef<'tcx>],
+                                      fn_ty: &FnType,
+                                      llargs: &[ValueRef],
                                       llresult: ValueRef,
                                       span: Span) {
     let ccx = bcx.ccx;
@@ -105,34 +106,27 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
     let ret_ty = sig.output();
     let name = &*tcx.item_name(def_id);
 
-    let llret_ty = ccx.layout_of(ret_ty).llvm_type(ccx);
-    let result = LvalueRef::new_sized(llresult, fn_ty.ret.layout, Alignment::AbiAligned);
+    let llret_ty = type_of::type_of(ccx, ret_ty);
 
     let simple = get_simple_intrinsic(ccx, name);
     let llval = match name {
         _ if simple.is_some() => {
-            bcx.call(simple.unwrap(),
-                     &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
-                     None)
+            bcx.call(simple.unwrap(), &llargs, None)
         }
         "unreachable" => {
             return;
         },
         "likely" => {
             let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
-            bcx.call(expect, &[args[0].immediate(), C_bool(ccx, true)], None)
+            bcx.call(expect, &[llargs[0], C_bool(ccx, true)], None)
         }
         "unlikely" => {
             let expect = ccx.get_intrinsic(&("llvm.expect.i1"));
-            bcx.call(expect, &[args[0].immediate(), C_bool(ccx, false)], None)
+            bcx.call(expect, &[llargs[0], C_bool(ccx, false)], None)
         }
         "try" => {
-            try_intrinsic(bcx, ccx,
-                          args[0].immediate(),
-                          args[1].immediate(),
-                          args[2].immediate(),
-                          llresult);
-            return;
+            try_intrinsic(bcx, ccx, llargs[0], llargs[1], llargs[2], llresult);
+            C_nil(ccx)
         }
         "breakpoint" => {
             let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
@@ -140,35 +134,42 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         }
         "size_of" => {
             let tp_ty = substs.type_at(0);
-            C_usize(ccx, ccx.size_of(tp_ty).bytes())
+            let lltp_ty = type_of::type_of(ccx, tp_ty);
+            C_usize(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
         }
         "size_of_val" => {
             let tp_ty = substs.type_at(0);
-            if let OperandValue::Pair(_, meta) = args[0].val {
+            if bcx.ccx.shared().type_is_sized(tp_ty) {
+                let lltp_ty = type_of::type_of(ccx, tp_ty);
+                C_usize(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
+            } else if bcx.ccx.shared().type_has_metadata(tp_ty) {
                 let (llsize, _) =
-                    glue::size_and_align_of_dst(bcx, tp_ty, meta);
+                    glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
                 llsize
             } else {
-                C_usize(ccx, ccx.size_of(tp_ty).bytes())
+                C_usize(ccx, 0u64)
             }
         }
         "min_align_of" => {
             let tp_ty = substs.type_at(0);
-            C_usize(ccx, ccx.align_of(tp_ty).abi())
+            C_usize(ccx, ccx.align_of(tp_ty) as u64)
         }
         "min_align_of_val" => {
             let tp_ty = substs.type_at(0);
-            if let OperandValue::Pair(_, meta) = args[0].val {
+            if bcx.ccx.shared().type_is_sized(tp_ty) {
+                C_usize(ccx, ccx.align_of(tp_ty) as u64)
+            } else if bcx.ccx.shared().type_has_metadata(tp_ty) {
                 let (_, llalign) =
-                    glue::size_and_align_of_dst(bcx, tp_ty, meta);
+                    glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
                 llalign
             } else {
-                C_usize(ccx, ccx.align_of(tp_ty).abi())
+                C_usize(ccx, 1u64)
             }
         }
         "pref_align_of" => {
             let tp_ty = substs.type_at(0);
-            C_usize(ccx, ccx.align_of(tp_ty).pref())
+            let lltp_ty = type_of::type_of(ccx, tp_ty);
+            C_usize(ccx, machine::llalign_of_pref(ccx, lltp_ty) as u64)
         }
         "type_name" => {
             let tp_ty = substs.type_at(0);
@@ -180,18 +181,18 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         }
         "init" => {
             let ty = substs.type_at(0);
-            if !ccx.layout_of(ty).is_zst() {
+            if !type_is_zero_size(ccx, ty) {
                 // Just zero out the stack slot.
                 // If we store a zero constant, LLVM will drown in vreg allocation for large data
                 // structures, and the generated code will be awful. (A telltale sign of this is
                 // large quantities of `mov [byte ptr foo],0` in the generated code.)
                 memset_intrinsic(bcx, false, ty, llresult, C_u8(ccx, 0), C_usize(ccx, 1));
             }
-            return;
+            C_nil(ccx)
         }
         // Effectively no-ops
         "uninit" => {
-            return;
+            C_nil(ccx)
         }
         "needs_drop" => {
             let tp_ty = substs.type_at(0);
@@ -199,75 +200,69 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             C_bool(ccx, bcx.ccx.shared().type_needs_drop(tp_ty))
         }
         "offset" => {
-            let ptr = args[0].immediate();
-            let offset = args[1].immediate();
+            let ptr = llargs[0];
+            let offset = llargs[1];
             bcx.inbounds_gep(ptr, &[offset])
         }
         "arith_offset" => {
-            let ptr = args[0].immediate();
-            let offset = args[1].immediate();
+            let ptr = llargs[0];
+            let offset = llargs[1];
             bcx.gep(ptr, &[offset])
         }
 
         "copy_nonoverlapping" => {
-            copy_intrinsic(bcx, false, false, substs.type_at(0),
-                           args[1].immediate(), args[0].immediate(), args[2].immediate())
+            copy_intrinsic(bcx, false, false, substs.type_at(0), llargs[1], llargs[0], llargs[2])
         }
         "copy" => {
-            copy_intrinsic(bcx, true, false, substs.type_at(0),
-                           args[1].immediate(), args[0].immediate(), args[2].immediate())
+            copy_intrinsic(bcx, true, false, substs.type_at(0), llargs[1], llargs[0], llargs[2])
         }
         "write_bytes" => {
-            memset_intrinsic(bcx, false, substs.type_at(0),
-                             args[0].immediate(), args[1].immediate(), args[2].immediate())
+            memset_intrinsic(bcx, false, substs.type_at(0), llargs[0], llargs[1], llargs[2])
         }
 
         "volatile_copy_nonoverlapping_memory" => {
-            copy_intrinsic(bcx, false, true, substs.type_at(0),
-                           args[0].immediate(), args[1].immediate(), args[2].immediate())
+            copy_intrinsic(bcx, false, true, substs.type_at(0), llargs[0], llargs[1], llargs[2])
         }
         "volatile_copy_memory" => {
-            copy_intrinsic(bcx, true, true, substs.type_at(0),
-                           args[0].immediate(), args[1].immediate(), args[2].immediate())
+            copy_intrinsic(bcx, true, true, substs.type_at(0), llargs[0], llargs[1], llargs[2])
         }
         "volatile_set_memory" => {
-            memset_intrinsic(bcx, true, substs.type_at(0),
-                             args[0].immediate(), args[1].immediate(), args[2].immediate())
+            memset_intrinsic(bcx, true, substs.type_at(0), llargs[0], llargs[1], llargs[2])
         }
         "volatile_load" => {
             let tp_ty = substs.type_at(0);
-            let mut ptr = args[0].immediate();
-            if let PassMode::Cast(ty) = fn_ty.ret.mode {
-                ptr = bcx.pointercast(ptr, ty.llvm_type(ccx).ptr_to());
+            let mut ptr = llargs[0];
+            if let Some(ty) = fn_ty.ret.cast {
+                ptr = bcx.pointercast(ptr, ty.ptr_to());
             }
             let load = bcx.volatile_load(ptr);
             unsafe {
-                llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty).abi() as u32);
+                llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty));
             }
-            to_immediate(bcx, load, ccx.layout_of(tp_ty))
+            to_immediate(bcx, load, tp_ty)
         },
         "volatile_store" => {
             let tp_ty = substs.type_at(0);
-            let dst = args[0].deref(bcx.ccx);
-            if let OperandValue::Pair(a, b) = args[1].val {
-                bcx.volatile_store(a, dst.project_field(bcx, 0).llval);
-                bcx.volatile_store(b, dst.project_field(bcx, 1).llval);
+            if type_is_fat_ptr(bcx.ccx, tp_ty) {
+                bcx.volatile_store(llargs[1], get_dataptr(bcx, llargs[0]));
+                bcx.volatile_store(llargs[2], get_meta(bcx, llargs[0]));
             } else {
-                let val = if let OperandValue::Ref(ptr, align) = args[1].val {
-                    bcx.load(ptr, align.non_abi())
+                let val = if fn_ty.args[1].is_indirect() {
+                    bcx.load(llargs[1], None)
                 } else {
-                    if dst.layout.is_zst() {
-                        return;
+                    if !type_is_zero_size(ccx, tp_ty) {
+                        from_immediate(bcx, llargs[1])
+                    } else {
+                        C_nil(ccx)
                     }
-                    from_immediate(bcx, args[1].immediate())
                 };
-                let ptr = bcx.pointercast(dst.llval, val_ty(val).ptr_to());
+                let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to());
                 let store = bcx.volatile_store(val, ptr);
                 unsafe {
-                    llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty).abi() as u32);
+                    llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty));
                 }
             }
-            return;
+            C_nil(ccx)
         },
         "prefetch_read_data" | "prefetch_write_data" |
         "prefetch_read_instruction" | "prefetch_write_instruction" => {
@@ -279,40 +274,35 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                 "prefetch_write_instruction" => (1, 0),
                 _ => bug!()
             };
-            bcx.call(expect, &[
-                args[0].immediate(),
-                C_i32(ccx, rw),
-                args[1].immediate(),
-                C_i32(ccx, cache_type)
-            ], None)
+            bcx.call(expect, &[llargs[0], C_i32(ccx, rw), llargs[1], C_i32(ccx, cache_type)], None)
         },
         "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
         "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" |
         "overflowing_add" | "overflowing_sub" | "overflowing_mul" |
         "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" => {
-            let ty = arg_tys[0];
-            match int_type_width_signed(ty, ccx) {
+            let sty = &arg_tys[0].sty;
+            match int_type_width_signed(sty, ccx) {
                 Some((width, signed)) =>
                     match name {
                         "ctlz" | "cttz" => {
                             let y = C_bool(bcx.ccx, false);
                             let llfn = ccx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
-                            bcx.call(llfn, &[args[0].immediate(), y], None)
+                            bcx.call(llfn, &[llargs[0], y], None)
                         }
                         "ctlz_nonzero" | "cttz_nonzero" => {
                             let y = C_bool(bcx.ccx, true);
                             let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
                             let llfn = ccx.get_intrinsic(llvm_name);
-                            bcx.call(llfn, &[args[0].immediate(), y], None)
+                            bcx.call(llfn, &[llargs[0], y], None)
                         }
                         "ctpop" => bcx.call(ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
-                                        &[args[0].immediate()], None),
+                                        &llargs, None),
                         "bswap" => {
                             if width == 8 {
-                                args[0].immediate() // byte swap a u8/i8 is just a no-op
+                                llargs[0] // byte swap a u8/i8 is just a no-op
                             } else {
                                 bcx.call(ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
-                                        &[args[0].immediate()], None)
+                                        &llargs, None)
                             }
                         }
                         "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
@@ -322,41 +312,35 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                             let llfn = bcx.ccx.get_intrinsic(&intrinsic);
 
                             // Convert `i1` to a `bool`, and write it to the out parameter
-                            let pair = bcx.call(llfn, &[
-                                args[0].immediate(),
-                                args[1].immediate()
-                            ], None);
-                            let val = bcx.extract_value(pair, 0);
-                            let overflow = bcx.zext(bcx.extract_value(pair, 1), Type::bool(ccx));
-
-                            let dest = result.project_field(bcx, 0);
-                            bcx.store(val, dest.llval, dest.alignment.non_abi());
-                            let dest = result.project_field(bcx, 1);
-                            bcx.store(overflow, dest.llval, dest.alignment.non_abi());
-
-                            return;
+                            let val = bcx.call(llfn, &[llargs[0], llargs[1]], None);
+                            let result = bcx.extract_value(val, 0);
+                            let overflow = bcx.zext(bcx.extract_value(val, 1), Type::bool(ccx));
+                            bcx.store(result, bcx.struct_gep(llresult, 0), None);
+                            bcx.store(overflow, bcx.struct_gep(llresult, 1), None);
+
+                            C_nil(bcx.ccx)
                         },
-                        "overflowing_add" => bcx.add(args[0].immediate(), args[1].immediate()),
-                        "overflowing_sub" => bcx.sub(args[0].immediate(), args[1].immediate()),
-                        "overflowing_mul" => bcx.mul(args[0].immediate(), args[1].immediate()),
+                        "overflowing_add" => bcx.add(llargs[0], llargs[1]),
+                        "overflowing_sub" => bcx.sub(llargs[0], llargs[1]),
+                        "overflowing_mul" => bcx.mul(llargs[0], llargs[1]),
                         "unchecked_div" =>
                             if signed {
-                                bcx.sdiv(args[0].immediate(), args[1].immediate())
+                                bcx.sdiv(llargs[0], llargs[1])
                             } else {
-                                bcx.udiv(args[0].immediate(), args[1].immediate())
+                                bcx.udiv(llargs[0], llargs[1])
                             },
                         "unchecked_rem" =>
                             if signed {
-                                bcx.srem(args[0].immediate(), args[1].immediate())
+                                bcx.srem(llargs[0], llargs[1])
                             } else {
-                                bcx.urem(args[0].immediate(), args[1].immediate())
+                                bcx.urem(llargs[0], llargs[1])
                             },
-                        "unchecked_shl" => bcx.shl(args[0].immediate(), args[1].immediate()),
+                        "unchecked_shl" => bcx.shl(llargs[0], llargs[1]),
                         "unchecked_shr" =>
                             if signed {
-                                bcx.ashr(args[0].immediate(), args[1].immediate())
+                                bcx.ashr(llargs[0], llargs[1])
                             } else {
-                                bcx.lshr(args[0].immediate(), args[1].immediate())
+                                bcx.lshr(llargs[0], llargs[1])
                             },
                         _ => bug!(),
                     },
@@ -364,8 +348,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     span_invalid_monomorphization_error(
                         tcx.sess, span,
                         &format!("invalid monomorphization of `{}` intrinsic: \
-                                  expected basic integer type, found `{}`", name, ty));
-                    return;
+                                  expected basic integer type, found `{}`", name, sty));
+                        C_nil(ccx)
                 }
             }
 
@@ -375,11 +359,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             match float_type_width(sty) {
                 Some(_width) =>
                     match name {
-                        "fadd_fast" => bcx.fadd_fast(args[0].immediate(), args[1].immediate()),
-                        "fsub_fast" => bcx.fsub_fast(args[0].immediate(), args[1].immediate()),
-                        "fmul_fast" => bcx.fmul_fast(args[0].immediate(), args[1].immediate()),
-                        "fdiv_fast" => bcx.fdiv_fast(args[0].immediate(), args[1].immediate()),
-                        "frem_fast" => bcx.frem_fast(args[0].immediate(), args[1].immediate()),
+                        "fadd_fast" => bcx.fadd_fast(llargs[0], llargs[1]),
+                        "fsub_fast" => bcx.fsub_fast(llargs[0], llargs[1]),
+                        "fmul_fast" => bcx.fmul_fast(llargs[0], llargs[1]),
+                        "fdiv_fast" => bcx.fdiv_fast(llargs[0], llargs[1]),
+                        "frem_fast" => bcx.frem_fast(llargs[0], llargs[1]),
                         _ => bug!(),
                     },
                 None => {
@@ -387,37 +371,40 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                         tcx.sess, span,
                         &format!("invalid monomorphization of `{}` intrinsic: \
                                   expected basic float type, found `{}`", name, sty));
-                    return;
+                        C_nil(ccx)
                 }
             }
 
         },
 
         "discriminant_value" => {
-            args[0].deref(bcx.ccx).trans_get_discr(bcx, ret_ty)
+            let val_ty = substs.type_at(0);
+            match val_ty.sty {
+                ty::TyAdt(adt, ..) if adt.is_enum() => {
+                    adt::trans_get_discr(bcx, val_ty, llargs[0], Alignment::AbiAligned,
+                                         Some(llret_ty), true)
+                }
+                _ => C_null(llret_ty)
+            }
         }
 
         "align_offset" => {
             // `ptr as usize`
-            let ptr_val = bcx.ptrtoint(args[0].immediate(), bcx.ccx.isize_ty());
+            let ptr_val = bcx.ptrtoint(llargs[0], bcx.ccx.isize_ty());
             // `ptr_val % align`
-            let align = args[1].immediate();
-            let offset = bcx.urem(ptr_val, align);
+            let offset = bcx.urem(ptr_val, llargs[1]);
             let zero = C_null(bcx.ccx.isize_ty());
             // `offset == 0`
             let is_zero = bcx.icmp(llvm::IntPredicate::IntEQ, offset, zero);
             // `if offset == 0 { 0 } else { offset - align }`
-            bcx.select(is_zero, zero, bcx.sub(offset, align))
+            bcx.select(is_zero, zero, bcx.sub(offset, llargs[1]))
         }
         name if name.starts_with("simd_") => {
-            match generic_simd_intrinsic(bcx, name,
-                                         callee_ty,
-                                         args,
-                                         ret_ty, llret_ty,
-                                         span) {
-                Ok(llval) => llval,
-                Err(()) => return
-            }
+            generic_simd_intrinsic(bcx, name,
+                                   callee_ty,
+                                   &llargs,
+                                   ret_ty, llret_ty,
+                                   span)
         }
         // This requires that atomic intrinsics follow a specific naming pattern:
         // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
@@ -451,66 +438,57 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                 _ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
             };
 
-            let invalid_monomorphization = |ty| {
+            let invalid_monomorphization = |sty| {
                 span_invalid_monomorphization_error(tcx.sess, span,
                     &format!("invalid monomorphization of `{}` intrinsic: \
-                              expected basic integer type, found `{}`", name, ty));
+                              expected basic integer type, found `{}`", name, sty));
             };
 
             match split[1] {
                 "cxchg" | "cxchgweak" => {
-                    let ty = substs.type_at(0);
-                    if int_type_width_signed(ty, ccx).is_some() {
+                    let sty = &substs.type_at(0).sty;
+                    if int_type_width_signed(sty, ccx).is_some() {
                         let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
-                        let pair = bcx.atomic_cmpxchg(
-                            args[0].immediate(),
-                            args[1].immediate(),
-                            args[2].immediate(),
-                            order,
-                            failorder,
-                            weak);
-                        let val = bcx.extract_value(pair, 0);
-                        let success = bcx.zext(bcx.extract_value(pair, 1), Type::bool(bcx.ccx));
-
-                        let dest = result.project_field(bcx, 0);
-                        bcx.store(val, dest.llval, dest.alignment.non_abi());
-                        let dest = result.project_field(bcx, 1);
-                        bcx.store(success, dest.llval, dest.alignment.non_abi());
-                        return;
+                        let val = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order,
+                            failorder, weak);
+                        let result = bcx.extract_value(val, 0);
+                        let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx));
+                        bcx.store(result, bcx.struct_gep(llresult, 0), None);
+                        bcx.store(success, bcx.struct_gep(llresult, 1), None);
                     } else {
-                        return invalid_monomorphization(ty);
+                        invalid_monomorphization(sty);
                     }
+                    C_nil(ccx)
                 }
 
                 "load" => {
-                    let ty = substs.type_at(0);
-                    if int_type_width_signed(ty, ccx).is_some() {
-                        let align = ccx.align_of(ty);
-                        bcx.atomic_load(args[0].immediate(), order, align)
+                    let sty = &substs.type_at(0).sty;
+                    if int_type_width_signed(sty, ccx).is_some() {
+                        bcx.atomic_load(llargs[0], order)
                     } else {
-                        return invalid_monomorphization(ty);
+                        invalid_monomorphization(sty);
+                        C_nil(ccx)
                     }
                 }
 
                 "store" => {
-                    let ty = substs.type_at(0);
-                    if int_type_width_signed(ty, ccx).is_some() {
-                        let align = ccx.align_of(ty);
-                        bcx.atomic_store(args[1].immediate(), args[0].immediate(), order, align);
-                        return;
+                    let sty = &substs.type_at(0).sty;
+                    if int_type_width_signed(sty, ccx).is_some() {
+                        bcx.atomic_store(llargs[1], llargs[0], order);
                     } else {
-                        return invalid_monomorphization(ty);
+                        invalid_monomorphization(sty);
                     }
+                    C_nil(ccx)
                 }
 
                 "fence" => {
                     bcx.atomic_fence(order, llvm::SynchronizationScope::CrossThread);
-                    return;
+                    C_nil(ccx)
                 }
 
                 "singlethreadfence" => {
                     bcx.atomic_fence(order, llvm::SynchronizationScope::SingleThread);
-                    return;
+                    C_nil(ccx)
                 }
 
                 // These are all AtomicRMW ops
@@ -530,11 +508,12 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                         _ => ccx.sess().fatal("unknown atomic operation")
                     };
 
-                    let ty = substs.type_at(0);
-                    if int_type_width_signed(ty, ccx).is_some() {
-                        bcx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order)
+                    let sty = &substs.type_at(0).sty;
+                    if int_type_width_signed(sty, ccx).is_some() {
+                        bcx.atomic_rmw(atom_op, llargs[0], llargs[1], order)
                     } else {
-                        return invalid_monomorphization(ty);
+                        invalid_monomorphization(sty);
+                        C_nil(ccx)
                     }
                 }
             }
@@ -549,11 +528,13 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                 assert_eq!(x.len(), 1);
                 x.into_iter().next().unwrap()
             }
-            fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type) -> Vec<Type> {
+            fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
+                          any_changes_needed: &mut bool) -> Vec<Type> {
                 use intrinsics::Type::*;
                 match *t {
                     Void => vec![Type::void(ccx)],
-                    Integer(_signed, _width, llvm_width) => {
+                    Integer(_signed, width, llvm_width) => {
+                        *any_changes_needed |= width != llvm_width;
                         vec![Type::ix(ccx, llvm_width as u64)]
                     }
                     Float(x) => {
@@ -564,24 +545,29 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                         }
                     }
                     Pointer(ref t, ref llvm_elem, _const) => {
+                        *any_changes_needed |= llvm_elem.is_some();
+
                         let t = llvm_elem.as_ref().unwrap_or(t);
-                        let elem = one(ty_to_type(ccx, t));
+                        let elem = one(ty_to_type(ccx, t, any_changes_needed));
                         vec![elem.ptr_to()]
                     }
                     Vector(ref t, ref llvm_elem, length) => {
+                        *any_changes_needed |= llvm_elem.is_some();
+
                         let t = llvm_elem.as_ref().unwrap_or(t);
-                        let elem = one(ty_to_type(ccx, t));
+                        let elem = one(ty_to_type(ccx, t, any_changes_needed));
                         vec![Type::vector(&elem, length as u64)]
                     }
                     Aggregate(false, ref contents) => {
                         let elems = contents.iter()
-                                            .map(|t| one(ty_to_type(ccx, t)))
+                                            .map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
                                             .collect::<Vec<_>>();
                         vec![Type::struct_(ccx, &elems, false)]
                     }
                     Aggregate(true, ref contents) => {
+                        *any_changes_needed = true;
                         contents.iter()
-                                .flat_map(|t| ty_to_type(ccx, t))
+                                .flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
                                 .collect()
                     }
                 }
@@ -593,7 +579,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             // cast.
             fn modify_as_needed<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                                           t: &intrinsics::Type,
-                                          arg: &OperandRef<'tcx>)
+                                          arg_type: Ty<'tcx>,
+                                          llarg: ValueRef)
                                           -> Vec<ValueRef>
             {
                 match *t {
@@ -604,44 +591,55 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                         // This assumes the type is "simple", i.e. no
                         // destructors, and the contents are SIMD
                         // etc.
-                        assert!(!bcx.ccx.shared().type_needs_drop(arg.layout.ty));
-                        let (ptr, align) = match arg.val {
-                            OperandValue::Ref(ptr, align) => (ptr, align),
-                            _ => bug!()
-                        };
-                        let arg = LvalueRef::new_sized(ptr, arg.layout, align);
+                        assert!(!bcx.ccx.shared().type_needs_drop(arg_type));
+                        let arg = LvalueRef::new_sized_ty(llarg, arg_type, Alignment::AbiAligned);
                         (0..contents.len()).map(|i| {
-                            arg.project_field(bcx, i).load(bcx).immediate()
+                            let (ptr, align) = arg.trans_field_ptr(bcx, i);
+                            bcx.load(ptr, align.to_align())
                         }).collect()
                     }
                     intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
-                        let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem));
-                        vec![bcx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
+                        let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false));
+                        vec![bcx.pointercast(llarg, llvm_elem.ptr_to())]
                     }
                     intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
-                        let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem));
-                        vec![bcx.bitcast(arg.immediate(), Type::vector(&llvm_elem, length as u64))]
+                        let llvm_elem = one(ty_to_type(bcx.ccx, llvm_elem, &mut false));
+                        vec![bcx.bitcast(llarg, Type::vector(&llvm_elem, length as u64))]
                     }
                     intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
                         // the LLVM intrinsic uses a smaller integer
                         // size than the C intrinsic's signature, so
                         // we have to trim it down here.
-                        vec![bcx.trunc(arg.immediate(), Type::ix(bcx.ccx, llvm_width as u64))]
+                        vec![bcx.trunc(llarg, Type::ix(bcx.ccx, llvm_width as u64))]
                     }
-                    _ => vec![arg.immediate()],
+                    _ => vec![llarg],
                 }
             }
 
 
+            let mut any_changes_needed = false;
             let inputs = intr.inputs.iter()
-                                    .flat_map(|t| ty_to_type(ccx, t))
+                                    .flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
                                     .collect::<Vec<_>>();
 
-            let outputs = one(ty_to_type(ccx, &intr.output));
+            let mut out_changes = false;
+            let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
+            // outputting a flattened aggregate is nonsense
+            assert!(!out_changes);
 
-            let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
-                modify_as_needed(bcx, t, arg)
-            }).collect();
+            let llargs = if !any_changes_needed {
+                // no aggregates to flatten, so no change needed
+                llargs.to_vec()
+            } else {
+                // there are some aggregates that need to be flattened
+                // in the LLVM call, so we need to run over the types
+                // again to find them and extract the arguments
+                intr.inputs.iter()
+                           .zip(llargs)
+                           .zip(arg_tys)
+                           .flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
+                           .collect()
+            };
             assert_eq!(inputs.len(), llargs.len());
 
             let val = match intr.definition {
@@ -659,24 +657,25 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                     assert!(!flatten);
 
                     for i in 0..elems.len() {
-                        let dest = result.project_field(bcx, i);
-                        let val = bcx.extract_value(val, i as u64);
-                        bcx.store(val, dest.llval, dest.alignment.non_abi());
+                        let val = bcx.extract_value(val, i);
+                        let lval = LvalueRef::new_sized_ty(llresult, ret_ty,
+                                                           Alignment::AbiAligned);
+                        let (dest, align) = lval.trans_field_ptr(bcx, i);
+                        bcx.store(val, dest, align.to_align());
                     }
-                    return;
+                    C_nil(ccx)
                 }
                 _ => val,
             }
         }
     };
 
-    if !fn_ty.ret.is_ignore() {
-        if let PassMode::Cast(ty) = fn_ty.ret.mode {
-            let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to());
+    if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
+        if let Some(ty) = fn_ty.ret.cast {
+            let ptr = bcx.pointercast(llresult, ty.ptr_to());
             bcx.store(llval, ptr, Some(ccx.align_of(ret_ty)));
         } else {
-            OperandRef::from_immediate_or_packed_pair(bcx, llval, result.layout)
-                .val.store(bcx, result);
+            store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty);
         }
     }
 }
@@ -684,15 +683,16 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
 fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                             allow_overlap: bool,
                             volatile: bool,
-                            ty: Ty<'tcx>,
+                            tp_ty: Ty<'tcx>,
                             dst: ValueRef,
                             src: ValueRef,
                             count: ValueRef)
                             -> ValueRef {
     let ccx = bcx.ccx;
-    let (size, align) = ccx.size_and_align_of(ty);
-    let size = C_usize(ccx, size.bytes());
-    let align = C_i32(ccx, align.abi() as i32);
+    let lltp_ty = type_of::type_of(ccx, tp_ty);
+    let align = C_i32(ccx, ccx.align_of(tp_ty) as i32);
+    let size = machine::llsize_of(ccx, lltp_ty);
+    let int_size = machine::llbitsize_of_real(ccx, ccx.isize_ty());
 
     let operation = if allow_overlap {
         "memmove"
@@ -700,8 +700,7 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         "memcpy"
     };
 
-    let name = format!("llvm.{}.p0i8.p0i8.i{}", operation,
-                       ccx.data_layout().pointer_size.bits());
+    let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
 
     let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx));
     let src_ptr = bcx.pointercast(src, Type::i8p(ccx));
@@ -725,9 +724,9 @@ fn memset_intrinsic<'a, 'tcx>(
     count: ValueRef
 ) -> ValueRef {
     let ccx = bcx.ccx;
-    let (size, align) = ccx.size_and_align_of(ty);
-    let size = C_usize(ccx, size.bytes());
-    let align = C_i32(ccx, align.abi() as i32);
+    let align = C_i32(ccx, ccx.align_of(ty) as i32);
+    let lltp_ty = type_of::type_of(ccx, ty);
+    let size = machine::llsize_of(ccx, lltp_ty);
     let dst = bcx.pointercast(dst, Type::i8p(ccx));
     call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile)
 }
@@ -817,7 +816,7 @@ fn trans_msvc_try<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
         //
         // More information can be found in libstd's seh.rs implementation.
         let i64p = Type::i64(ccx).ptr_to();
-        let slot = bcx.alloca(i64p, "slot", ccx.data_layout().pointer_align);
+        let slot = bcx.alloca(i64p, "slot", None);
         bcx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(),
             None);
 
@@ -973,11 +972,11 @@ fn generic_simd_intrinsic<'a, 'tcx>(
     bcx: &Builder<'a, 'tcx>,
     name: &str,
     callee_ty: Ty<'tcx>,
-    args: &[OperandRef<'tcx>],
+    llargs: &[ValueRef],
     ret_ty: Ty<'tcx>,
     llret_ty: Type,
     span: Span
-) -> Result<ValueRef, ()> {
+) -> ValueRef {
     // macros for error handling:
     macro_rules! emit_error {
         ($msg: tt) => {
@@ -995,7 +994,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
         ($cond: expr, $($fmt: tt)*) => {
             if !$cond {
                 emit_error!($($fmt)*);
-                return Err(());
+                return C_nil(bcx.ccx)
             }
         }
     }
@@ -1041,12 +1040,12 @@ fn generic_simd_intrinsic<'a, 'tcx>(
                  ret_ty,
                  ret_ty.simd_type(tcx));
 
-        return Ok(compare_simd_types(bcx,
-                                     args[0].immediate(),
-                                     args[1].immediate(),
-                                     in_elem,
-                                     llret_ty,
-                                     cmp_op))
+        return compare_simd_types(bcx,
+                                  llargs[0],
+                                  llargs[1],
+                                  in_elem,
+                                  llret_ty,
+                                  cmp_op)
     }
 
     if name.starts_with("simd_shuffle") {
@@ -1070,12 +1069,12 @@ fn generic_simd_intrinsic<'a, 'tcx>(
 
         let total_len = in_len as u128 * 2;
 
-        let vector = args[2].immediate();
+        let vector = llargs[2];
 
         let indices: Option<Vec<_>> = (0..n)
             .map(|i| {
                 let arg_idx = i;
-                let val = const_get_elt(vector, i as u64);
+                let val = const_get_elt(vector, &[i as libc::c_uint]);
                 match const_to_opt_u128(val, true) {
                     None => {
                         emit_error!("shuffle index #{} is not a constant", arg_idx);
@@ -1092,27 +1091,23 @@ fn generic_simd_intrinsic<'a, 'tcx>(
             .collect();
         let indices = match indices {
             Some(i) => i,
-            None => return Ok(C_null(llret_ty))
+            None => return C_null(llret_ty)
         };
 
-        return Ok(bcx.shuffle_vector(args[0].immediate(),
-                                     args[1].immediate(),
-                                     C_vector(&indices)))
+        return bcx.shuffle_vector(llargs[0], llargs[1], C_vector(&indices))
     }
 
     if name == "simd_insert" {
         require!(in_elem == arg_tys[2],
                  "expected inserted type `{}` (element of input `{}`), found `{}`",
                  in_elem, in_ty, arg_tys[2]);
-        return Ok(bcx.insert_element(args[0].immediate(),
-                                     args[2].immediate(),
-                                     args[1].immediate()))
+        return bcx.insert_element(llargs[0], llargs[2], llargs[1])
     }
     if name == "simd_extract" {
         require!(ret_ty == in_elem,
                  "expected return type `{}` (element of input `{}`), found `{}`",
                  in_elem, in_ty, ret_ty);
-        return Ok(bcx.extract_element(args[0].immediate(), args[1].immediate()))
+        return bcx.extract_element(llargs[0], llargs[1])
     }
 
     if name == "simd_cast" {
@@ -1126,7 +1121,7 @@ fn generic_simd_intrinsic<'a, 'tcx>(
         // casting cares about nominal type, not just structural type
         let out_elem = ret_ty.simd_type(tcx);
 
-        if in_elem == out_elem { return Ok(args[0].immediate()); }
+        if in_elem == out_elem { return llargs[0]; }
 
         enum Style { Float, Int(/* is signed? */ bool), Unsupported }
 
@@ -1147,36 +1142,36 @@ fn generic_simd_intrinsic<'a, 'tcx>(
 
         match (in_style, out_style) {
             (Style::Int(in_is_signed), Style::Int(_)) => {
-                return Ok(match in_width.cmp(&out_width) {
-                    Ordering::Greater => bcx.trunc(args[0].immediate(), llret_ty),
-                    Ordering::Equal => args[0].immediate(),
+                return match in_width.cmp(&out_width) {
+                    Ordering::Greater => bcx.trunc(llargs[0], llret_ty),
+                    Ordering::Equal => llargs[0],
                     Ordering::Less => if in_is_signed {
-                        bcx.sext(args[0].immediate(), llret_ty)
+                        bcx.sext(llargs[0], llret_ty)
                     } else {
-                        bcx.zext(args[0].immediate(), llret_ty)
+                        bcx.zext(llargs[0], llret_ty)
                     }
-                })
+                }
             }
             (Style::Int(in_is_signed), Style::Float) => {
-                return Ok(if in_is_signed {
-                    bcx.sitofp(args[0].immediate(), llret_ty)
+                return if in_is_signed {
+                    bcx.sitofp(llargs[0], llret_ty)
                 } else {
-                    bcx.uitofp(args[0].immediate(), llret_ty)
-                })
+                    bcx.uitofp(llargs[0], llret_ty)
+                }
             }
             (Style::Float, Style::Int(out_is_signed)) => {
-                return Ok(if out_is_signed {
-                    bcx.fptosi(args[0].immediate(), llret_ty)
+                return if out_is_signed {
+                    bcx.fptosi(llargs[0], llret_ty)
                 } else {
-                    bcx.fptoui(args[0].immediate(), llret_ty)
-                })
+                    bcx.fptoui(llargs[0], llret_ty)
+                }
             }
             (Style::Float, Style::Float) => {
-                return Ok(match in_width.cmp(&out_width) {
-                    Ordering::Greater => bcx.fptrunc(args[0].immediate(), llret_ty),
-                    Ordering::Equal => args[0].immediate(),
-                    Ordering::Less => bcx.fpext(args[0].immediate(), llret_ty)
-                })
+                return match in_width.cmp(&out_width) {
+                    Ordering::Greater => bcx.fptrunc(llargs[0], llret_ty),
+                    Ordering::Equal => llargs[0],
+                    Ordering::Less => bcx.fpext(llargs[0], llret_ty)
+                }
             }
             _ => {/* Unsupported. Fallthrough. */}
         }
@@ -1187,18 +1182,21 @@ fn generic_simd_intrinsic<'a, 'tcx>(
     }
     macro_rules! arith {
         ($($name: ident: $($($p: ident),* => $call: ident),*;)*) => {
-            $(if name == stringify!($name) {
-                match in_elem.sty {
-                    $($(ty::$p(_))|* => {
-                        return Ok(bcx.$call(args[0].immediate(), args[1].immediate()))
-                    })*
-                    _ => {},
-                }
-                require!(false,
-                            "unsupported operation on `{}` with element `{}`",
-                            in_ty,
-                            in_elem)
-            })*
+            $(
+                if name == stringify!($name) {
+                    match in_elem.sty {
+                        $(
+                            $(ty::$p(_))|* => {
+                                return bcx.$call(llargs[0], llargs[1])
+                            }
+                            )*
+                        _ => {},
+                    }
+                    require!(false,
+                             "unsupported operation on `{}` with element `{}`",
+                             in_ty,
+                             in_elem)
+                })*
         }
     }
     arith! {
@@ -1216,13 +1214,15 @@ fn generic_simd_intrinsic<'a, 'tcx>(
     span_bug!(span, "unknown SIMD intrinsic");
 }
 
-// Returns the width of an int Ty, and if it's signed or not
+// Returns the width of an int TypeVariant, and if it's signed or not
 // Returns None if the type is not an integer
 // FIXME: there’s multiple of this functions, investigate using some of the already existing
 // stuffs.
-fn int_type_width_signed(ty: Ty, ccx: &CrateContext) -> Option<(u64, bool)> {
-    match ty.sty {
-        ty::TyInt(t) => Some((match t {
+fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
+        -> Option<(u64, bool)> {
+    use rustc::ty::{TyInt, TyUint};
+    match *sty {
+        TyInt(t) => Some((match t {
             ast::IntTy::Is => {
                 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
                     "16" => 16,
@@ -1237,7 +1237,7 @@ fn int_type_width_signed(ty: Ty, ccx: &CrateContext) -> Option<(u64, bool)> {
             ast::IntTy::I64 => 64,
             ast::IntTy::I128 => 128,
         }, true)),
-        ty::TyUint(t) => Some((match t {
+        TyUint(t) => Some((match t {
             ast::UintTy::Us => {
                 match &ccx.tcx().sess.target.target.target_pointer_width[..] {
                     "16" => 16,
diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs
index 923d93587e9..ae25e7d94bf 100644
--- a/src/librustc_trans/lib.rs
+++ b/src/librustc_trans/lib.rs
@@ -25,8 +25,6 @@
 #![allow(unused_attributes)]
 #![feature(i128_type)]
 #![feature(i128)]
-#![feature(inclusive_range)]
-#![feature(inclusive_range_syntax)]
 #![feature(libc)]
 #![feature(quote)]
 #![feature(rustc_diagnostic_macros)]
@@ -106,6 +104,7 @@ pub mod back {
 }
 
 mod abi;
+mod adt;
 mod allocator;
 mod asm;
 mod assert_module_sources;
@@ -138,6 +137,7 @@ mod declare;
 mod glue;
 mod intrinsic;
 mod llvm_util;
+mod machine;
 mod metadata;
 mod meth;
 mod mir;
@@ -145,6 +145,7 @@ mod partitioning;
 mod symbol_names_test;
 mod time_graph;
 mod trans_item;
+mod tvec;
 mod type_;
 mod type_of;
 mod value;
diff --git a/src/librustc_trans/machine.rs b/src/librustc_trans/machine.rs
new file mode 100644
index 00000000000..bc383abc7e0
--- /dev/null
+++ b/src/librustc_trans/machine.rs
@@ -0,0 +1,79 @@
+// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Information concerning the machine representation of various types.
+
+#![allow(non_camel_case_types)]
+
+use llvm::{self, ValueRef};
+use common::*;
+
+use type_::Type;
+
+pub type llbits = u64;
+pub type llsize = u64;
+pub type llalign = u32;
+
+// ______________________________________________________________________
+// compute sizeof / alignof
+
+// Returns the number of bytes between successive elements of type T in an
+// array of T. This is the "ABI" size. It includes any ABI-mandated padding.
+pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize {
+    unsafe {
+        return llvm::LLVMABISizeOfType(cx.td(), ty.to_ref());
+    }
+}
+
+/// Returns the "real" size of the type in bits.
+pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits {
+    unsafe {
+        llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref())
+    }
+}
+
+/// Returns the size of the type as an LLVM constant integer value.
+pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef {
+    // Once upon a time, this called LLVMSizeOf, which does a
+    // getelementptr(1) on a null pointer and casts to an int, in
+    // order to obtain the type size as a value without requiring the
+    // target data layout.  But we have the target data layout, so
+    // there's no need for that contrivance.  The instruction
+    // selection DAG generator would flatten that GEP(1) node into a
+    // constant of the type's alloc size, so let's save it some work.
+    return C_usize(cx, llsize_of_alloc(cx, ty));
+}
+
+// Returns the preferred alignment of the given type for the current target.
+// The preferred alignment may be larger than the alignment used when
+// packing the type into structs. This will be used for things like
+// allocations inside a stack frame, which LLVM has a free hand in.
+pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> llalign {
+    unsafe {
+        return llvm::LLVMPreferredAlignmentOfType(cx.td(), ty.to_ref());
+    }
+}
+
+// Returns the minimum alignment of a type required by the platform.
+// This is the alignment that will be used for struct fields, arrays,
+// and similar ABI-mandated things.
+pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> llalign {
+    unsafe {
+        return llvm::LLVMABIAlignmentOfType(cx.td(), ty.to_ref());
+    }
+}
+
+pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: usize) -> u64 {
+    unsafe {
+        return llvm::LLVMOffsetOfElement(cx.td(),
+                                         struct_ty.to_ref(),
+                                         element as u32);
+    }
+}
diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs
index a7d467f1cc5..e7c5a36838c 100644
--- a/src/librustc_trans/meth.rs
+++ b/src/librustc_trans/meth.rs
@@ -9,20 +9,19 @@
 // except according to those terms.
 
 use llvm::ValueRef;
-use abi::FnType;
 use callee;
 use common::*;
 use builder::Builder;
 use consts;
+use machine;
 use monomorphize;
 use type_::Type;
 use value::Value;
 use rustc::ty::{self, Ty};
-use rustc::ty::layout::HasDataLayout;
 use debuginfo;
 
 #[derive(Copy, Clone, Debug)]
-pub struct VirtualIndex(u64);
+pub struct VirtualIndex(usize);
 
 pub const DESTRUCTOR: VirtualIndex = VirtualIndex(0);
 pub const SIZE: VirtualIndex = VirtualIndex(1);
@@ -30,18 +29,14 @@ pub const ALIGN: VirtualIndex = VirtualIndex(2);
 
 impl<'a, 'tcx> VirtualIndex {
     pub fn from_index(index: usize) -> Self {
-        VirtualIndex(index as u64 + 3)
+        VirtualIndex(index + 3)
     }
 
-    pub fn get_fn(self, bcx: &Builder<'a, 'tcx>,
-                  llvtable: ValueRef,
-                  fn_ty: &FnType<'tcx>) -> ValueRef {
+    pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef {
         // Load the data pointer from the object.
         debug!("get_fn({:?}, {:?})", Value(llvtable), self);
 
-        let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.ccx).ptr_to().ptr_to());
-        let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None);
-        bcx.nonnull_metadata(ptr);
+        let ptr = bcx.load_nonnull(bcx.gepi(llvtable, &[self.0]), None);
         // Vtable loads are invariant
         bcx.set_invariant_load(ptr);
         ptr
@@ -52,7 +47,7 @@ impl<'a, 'tcx> VirtualIndex {
         debug!("get_int({:?}, {:?})", Value(llvtable), self);
 
         let llvtable = bcx.pointercast(llvtable, Type::isize(bcx.ccx).ptr_to());
-        let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None);
+        let ptr = bcx.load(bcx.gepi(llvtable, &[self.0]), None);
         // Vtable loads are invariant
         bcx.set_invariant_load(ptr);
         ptr
@@ -82,13 +77,12 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
     }
 
     // Not in the cache. Build it.
-    let nullptr = C_null(Type::i8p(ccx));
+    let nullptr = C_null(Type::nil(ccx).ptr_to());
 
-    let (size, align) = ccx.size_and_align_of(ty);
     let mut components: Vec<_> = [
         callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.tcx(), ty)),
-        C_usize(ccx, size.bytes()),
-        C_usize(ccx, align.abi())
+        C_usize(ccx, ccx.size_of(ty)),
+        C_usize(ccx, ccx.align_of(ty) as u64)
     ].iter().cloned().collect();
 
     if let Some(trait_ref) = trait_ref {
@@ -103,7 +97,7 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
     }
 
     let vtable_const = C_struct(ccx, &components, false);
-    let align = ccx.data_layout().pointer_align;
+    let align = machine::llalign_of_pref(ccx, val_ty(vtable_const));
     let vtable = consts::addr_of(ccx, vtable_const, align, "vtable");
 
     debuginfo::create_vtable_metadata(ccx, ty, vtable);
diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs
index 223379527c9..73f60ff29a8 100644
--- a/src/librustc_trans/mir/analyze.rs
+++ b/src/librustc_trans/mir/analyze.rs
@@ -18,8 +18,7 @@ use rustc::mir::{self, Location, TerminatorKind, Literal};
 use rustc::mir::visit::{Visitor, LvalueContext};
 use rustc::mir::traversal;
 use rustc::ty;
-use rustc::ty::layout::LayoutOf;
-use type_of::LayoutLlvmExt;
+use common;
 use super::MirContext;
 
 pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
@@ -31,15 +30,21 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector {
     for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() {
         let ty = mircx.monomorphize(&ty);
         debug!("local {} has type {:?}", index, ty);
-        let layout = mircx.ccx.layout_of(ty);
-        if layout.is_llvm_immediate() {
+        if ty.is_scalar() ||
+            ty.is_box() ||
+            ty.is_region_ptr() ||
+            ty.is_simd() ||
+            common::type_is_zero_size(mircx.ccx, ty)
+        {
             // These sorts of types are immediates that we can store
             // in an ValueRef without an alloca.
-        } else if layout.is_llvm_scalar_pair() {
+            assert!(common::type_is_immediate(mircx.ccx, ty) ||
+                    common::type_is_fat_ptr(mircx.ccx, ty));
+        } else if common::type_is_imm_pair(mircx.ccx, ty) {
             // We allow pairs and uses of any of their 2 fields.
         } else {
             // These sorts of types require an alloca. Note that
-            // is_llvm_immediate() may *still* be true, particularly
+            // type_is_immediate() may *still* be true, particularly
             // for newtypes, but we currently force some types
             // (e.g. structs) into an alloca unconditionally, just so
             // that we don't have to deal with having two pathways
@@ -136,29 +141,18 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
                     context: LvalueContext<'tcx>,
                     location: Location) {
         debug!("visit_lvalue(lvalue={:?}, context={:?})", lvalue, context);
-        let ccx = self.cx.ccx;
 
         if let mir::Lvalue::Projection(ref proj) = *lvalue {
-            // Allow uses of projections that are ZSTs or from scalar fields.
+            // Allow uses of projections of immediate pair fields.
             if let LvalueContext::Consume = context {
-                let base_ty = proj.base.ty(self.cx.mir, ccx.tcx());
-                let base_ty = self.cx.monomorphize(&base_ty);
-
-                // ZSTs don't require any actual memory access.
-                let elem_ty = base_ty.projection_ty(ccx.tcx(), &proj.elem).to_ty(ccx.tcx());
-                let elem_ty = self.cx.monomorphize(&elem_ty);
-                if ccx.layout_of(elem_ty).is_zst() {
-                    return;
-                }
+                if let mir::Lvalue::Local(_) = proj.base {
+                    if let mir::ProjectionElem::Field(..) = proj.elem {
+                        let ty = proj.base.ty(self.cx.mir, self.cx.ccx.tcx());
 
-                if let mir::ProjectionElem::Field(..) = proj.elem {
-                    let layout = ccx.layout_of(base_ty.to_ty(ccx.tcx()));
-                    if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() {
-                        // Recurse as a `Consume` instead of `Projection`,
-                        // potentially stopping at non-operand projections,
-                        // which would trigger `mark_as_lvalue` on locals.
-                        self.visit_lvalue(&proj.base, LvalueContext::Consume, location);
-                        return;
+                        let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx()));
+                        if common::type_is_imm_pair(self.cx.ccx, ty) {
+                            return;
+                        }
                     }
                 }
             }
@@ -184,9 +178,9 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
             LvalueContext::StorageLive |
             LvalueContext::StorageDead |
             LvalueContext::Validate |
+            LvalueContext::Inspect |
             LvalueContext::Consume => {}
 
-            LvalueContext::Inspect |
             LvalueContext::Store |
             LvalueContext::Borrow { .. } |
             LvalueContext::Projection(..) => {
diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs
index f43eba36a82..bd26c961bb2 100644
--- a/src/librustc_trans/mir/block.rs
+++ b/src/librustc_trans/mir/block.rs
@@ -11,24 +11,28 @@
 use llvm::{self, ValueRef, BasicBlockRef};
 use rustc::middle::lang_items;
 use rustc::middle::const_val::{ConstEvalErr, ConstInt, ErrKind};
-use rustc::ty::{self, TypeFoldable};
-use rustc::ty::layout::{self, LayoutOf};
+use rustc::ty::{self, Ty, TypeFoldable};
+use rustc::ty::layout::{self, LayoutTyper};
 use rustc::traits;
 use rustc::mir;
-use abi::{Abi, FnType, ArgType, PassMode};
-use base;
+use abi::{Abi, FnType, ArgType};
+use adt;
+use base::{self, Lifetime};
 use callee;
 use builder::Builder;
 use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_undef};
 use consts;
+use machine::llalign_of_min;
 use meth;
 use monomorphize;
-use type_of::LayoutLlvmExt;
+use type_of;
 use type_::Type;
 
 use syntax::symbol::Symbol;
 use syntax_pos::Pos;
 
+use std::cmp;
+
 use super::{MirContext, LocalRef};
 use super::constant::Const;
 use super::lvalue::{Alignment, LvalueRef};
@@ -116,11 +120,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             fn_ty: FnType<'tcx>,
             fn_ptr: ValueRef,
             llargs: &[ValueRef],
-            destination: Option<(ReturnDest<'tcx>, mir::BasicBlock)>,
+            destination: Option<(ReturnDest, Ty<'tcx>, mir::BasicBlock)>,
             cleanup: Option<mir::BasicBlock>
         | {
             if let Some(cleanup) = cleanup {
-                let ret_bcx = if let Some((_, target)) = destination {
+                let ret_bcx = if let Some((_, _, target)) = destination {
                     this.blocks[target]
                 } else {
                     this.unreachable_block()
@@ -132,10 +136,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                                            cleanup_bundle);
                 fn_ty.apply_attrs_callsite(invokeret);
 
-                if let Some((ret_dest, target)) = destination {
+                if let Some((ret_dest, ret_ty, target)) = destination {
                     let ret_bcx = this.get_builder(target);
                     this.set_debug_loc(&ret_bcx, terminator.source_info);
-                    this.store_return(&ret_bcx, ret_dest, &fn_ty.ret, invokeret);
+                    let op = OperandRef {
+                        val: Immediate(invokeret),
+                        ty: ret_ty,
+                    };
+                    this.store_return(&ret_bcx, ret_dest, &fn_ty.ret, op);
                 }
             } else {
                 let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle);
@@ -148,8 +156,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
                 }
 
-                if let Some((ret_dest, target)) = destination {
-                    this.store_return(&bcx, ret_dest, &fn_ty.ret, llret);
+                if let Some((ret_dest, ret_ty, target)) = destination {
+                    let op = OperandRef {
+                        val: Immediate(llret),
+                        ty: ret_ty,
+                    };
+                    this.store_return(&bcx, ret_dest, &fn_ty.ret, op);
                     funclet_br(this, bcx, target);
                 } else {
                     bcx.unreachable();
@@ -163,18 +175,14 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 if let Some(cleanup_pad) = cleanup_pad {
                     bcx.cleanup_ret(cleanup_pad, None);
                 } else {
-                    let slot = self.get_personality_slot(&bcx);
-                    let lp0 = slot.project_field(&bcx, 0).load(&bcx).immediate();
-                    let lp1 = slot.project_field(&bcx, 1).load(&bcx).immediate();
-                    slot.storage_dead(&bcx);
-
+                    let ps = self.get_personality_slot(&bcx);
+                    let lp = bcx.load(ps, None);
+                    Lifetime::End.call(&bcx, ps);
                     if !bcx.sess().target.target.options.custom_unwind_resume {
-                        let mut lp = C_undef(self.landing_pad_type());
-                        lp = bcx.insert_value(lp, lp0, 0);
-                        lp = bcx.insert_value(lp, lp1, 1);
                         bcx.resume(lp);
                     } else {
-                        bcx.call(bcx.ccx.eh_unwind_resume(), &[lp0], cleanup_bundle);
+                        let exc_ptr = bcx.extract_value(lp, 0);
+                        bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], cleanup_bundle);
                         bcx.unreachable();
                     }
                 }
@@ -207,47 +215,45 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             }
 
             mir::TerminatorKind::Return => {
-                let llval = match self.fn_ty.ret.mode {
-                    PassMode::Ignore | PassMode::Indirect(_) => {
-                        bcx.ret_void();
-                        return;
-                    }
-
-                    PassMode::Direct(_) | PassMode::Pair(..) => {
-                        let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER));
-                        if let Ref(llval, align) = op.val {
-                            bcx.load(llval, align.non_abi())
-                        } else {
-                            op.immediate_or_packed_pair(&bcx)
-                        }
-                    }
+                let ret = self.fn_ty.ret;
+                if ret.is_ignore() || ret.is_indirect() {
+                    bcx.ret_void();
+                    return;
+                }
 
-                    PassMode::Cast(cast_ty) => {
-                        let op = match self.locals[mir::RETURN_POINTER] {
-                            LocalRef::Operand(Some(op)) => op,
-                            LocalRef::Operand(None) => bug!("use of return before def"),
-                            LocalRef::Lvalue(tr_lvalue) => {
-                                OperandRef {
-                                    val: Ref(tr_lvalue.llval, tr_lvalue.alignment),
-                                    layout: tr_lvalue.layout
-                                }
+                let llval = if let Some(cast_ty) = ret.cast {
+                    let op = match self.locals[mir::RETURN_POINTER] {
+                        LocalRef::Operand(Some(op)) => op,
+                        LocalRef::Operand(None) => bug!("use of return before def"),
+                        LocalRef::Lvalue(tr_lvalue) => {
+                            OperandRef {
+                                val: Ref(tr_lvalue.llval, tr_lvalue.alignment),
+                                ty: tr_lvalue.ty.to_ty(bcx.tcx())
                             }
-                        };
-                        let llslot = match op.val {
-                            Immediate(_) | Pair(..) => {
-                                let scratch = LvalueRef::alloca(&bcx, self.fn_ty.ret.layout, "ret");
-                                op.val.store(&bcx, scratch);
-                                scratch.llval
-                            }
-                            Ref(llval, align) => {
-                                assert_eq!(align, Alignment::AbiAligned,
-                                           "return pointer is unaligned!");
-                                llval
-                            }
-                        };
-                        bcx.load(
-                            bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()),
-                            Some(self.fn_ty.ret.layout.align))
+                        }
+                    };
+                    let llslot = match op.val {
+                        Immediate(_) | Pair(..) => {
+                            let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret", None);
+                            self.store_operand(&bcx, llscratch, None, op);
+                            llscratch
+                        }
+                        Ref(llval, align) => {
+                            assert_eq!(align, Alignment::AbiAligned,
+                                       "return pointer is unaligned!");
+                            llval
+                        }
+                    };
+                    let load = bcx.load(
+                        bcx.pointercast(llslot, cast_ty.ptr_to()),
+                        Some(ret.layout.align(bcx.ccx).abi() as u32));
+                    load
+                } else {
+                    let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER));
+                    if let Ref(llval, align) = op.val {
+                        base::load_ty(&bcx, llval, align, op.ty)
+                    } else {
+                        op.pack_if_pair(&bcx).immediate()
                     }
                 };
                 bcx.ret(llval);
@@ -269,24 +275,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 }
 
                 let lvalue = self.trans_lvalue(&bcx, location);
-                let mut args: &[_] = &[lvalue.llval, lvalue.llextra];
-                args = &args[..1 + lvalue.has_extra() as usize];
-                let (drop_fn, fn_ty) = match ty.sty {
-                    ty::TyDynamic(..) => {
-                        let fn_ty = common::instance_ty(bcx.ccx.tcx(), &drop_fn);
-                        let sig = common::ty_fn_sig(bcx.ccx, fn_ty);
-                        let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
-                        let fn_ty = FnType::new_vtable(bcx.ccx, sig, &[]);
-                        args = &args[..1];
-                        (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra, &fn_ty), fn_ty)
-                    }
-                    _ => {
-                        (callee::get_fn(bcx.ccx, drop_fn),
-                         FnType::of_instance(bcx.ccx, &drop_fn))
-                    }
+                let fn_ty = FnType::of_instance(bcx.ccx, &drop_fn);
+                let (drop_fn, need_extra) = match ty.sty {
+                    ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra),
+                                          false),
+                    _ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra())
                 };
+                let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize];
                 do_call(self, bcx, fn_ty, drop_fn, args,
-                        Some((ReturnDest::Nothing, target)),
+                        Some((ReturnDest::Nothing, tcx.mk_nil(), target)),
                         unwind);
             }
 
@@ -339,9 +336,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 let filename = C_str_slice(bcx.ccx, filename);
                 let line = C_u32(bcx.ccx, loc.line as u32);
                 let col = C_u32(bcx.ccx, loc.col.to_usize() as u32 + 1);
-                let align = tcx.data_layout.aggregate_align
-                    .max(tcx.data_layout.i32_align)
-                    .max(tcx.data_layout.pointer_align);
 
                 // Put together the arguments to the panic entry point.
                 let (lang_item, args, const_err) = match *msg {
@@ -357,6 +351,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                                 }));
 
                         let file_line_col = C_struct(bcx.ccx, &[filename, line, col], false);
+                        let align = llalign_of_min(bcx.ccx, common::val_ty(file_line_col));
                         let file_line_col = consts::addr_of(bcx.ccx,
                                                             file_line_col,
                                                             align,
@@ -371,6 +366,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                         let msg_file_line_col = C_struct(bcx.ccx,
                                                      &[msg_str, filename, line, col],
                                                      false);
+                        let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line_col));
                         let msg_file_line_col = consts::addr_of(bcx.ccx,
                                                                 msg_file_line_col,
                                                                 align,
@@ -391,6 +387,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                         let msg_file_line_col = C_struct(bcx.ccx,
                                                      &[msg_str, filename, line, col],
                                                      false);
+                        let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line_col));
                         let msg_file_line_col = consts::addr_of(bcx.ccx,
                                                                 msg_file_line_col,
                                                                 align,
@@ -431,7 +428,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
                 let callee = self.trans_operand(&bcx, func);
 
-                let (instance, mut llfn) = match callee.layout.ty.sty {
+                let (instance, mut llfn) = match callee.ty.sty {
                     ty::TyFnDef(def_id, substs) => {
                         (Some(ty::Instance::resolve(bcx.ccx.tcx(),
                                                     ty::ParamEnv::empty(traits::Reveal::All),
@@ -442,10 +439,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     ty::TyFnPtr(_) => {
                         (None, Some(callee.immediate()))
                     }
-                    _ => bug!("{} is not callable", callee.layout.ty)
+                    _ => bug!("{} is not callable", callee.ty)
                 };
                 let def = instance.map(|i| i.def);
-                let sig = callee.layout.ty.fn_sig(bcx.tcx());
+                let sig = callee.ty.fn_sig(bcx.tcx());
                 let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig);
                 let abi = sig.abi;
 
@@ -496,51 +493,83 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     ReturnDest::Nothing
                 };
 
+                // Split the rust-call tupled arguments off.
+                let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
+                    let (tup, args) = args.split_last().unwrap();
+                    (args, Some(tup))
+                } else {
+                    (&args[..], None)
+                };
+
+                let is_shuffle = intrinsic.map_or(false, |name| {
+                    name.starts_with("simd_shuffle")
+                });
+                let mut idx = 0;
+                for arg in first_args {
+                    // The indices passed to simd_shuffle* in the
+                    // third argument must be constant. This is
+                    // checked by const-qualification, which also
+                    // promotes any complex rvalues to constants.
+                    if is_shuffle && idx == 2 {
+                        match *arg {
+                            mir::Operand::Consume(_) => {
+                                span_bug!(span, "shuffle indices must be constant");
+                            }
+                            mir::Operand::Constant(ref constant) => {
+                                let val = self.trans_constant(&bcx, constant);
+                                llargs.push(val.llval);
+                                idx += 1;
+                                continue;
+                            }
+                        }
+                    }
+
+                    let mut op = self.trans_operand(&bcx, arg);
+
+                    // The callee needs to own the argument memory if we pass it
+                    // by-ref, so make a local copy of non-immediate constants.
+                    if let (&mir::Operand::Constant(_), Ref(..)) = (arg, op.val) {
+                        let tmp = LvalueRef::alloca(&bcx, op.ty, "const");
+                        self.store_operand(&bcx, tmp.llval, tmp.alignment.to_align(), op);
+                        op.val = Ref(tmp.llval, tmp.alignment);
+                    }
+
+                    self.trans_argument(&bcx, op, &mut llargs, &fn_ty,
+                                        &mut idx, &mut llfn, &def);
+                }
+                if let Some(tup) = untuple {
+                    self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty,
+                                                  &mut idx, &mut llfn, &def)
+                }
+
                 if intrinsic.is_some() && intrinsic != Some("drop_in_place") {
                     use intrinsic::trans_intrinsic_call;
 
-                    let dest = match ret_dest {
-                        _ if fn_ty.ret.is_indirect() => llargs[0],
+                    let (dest, llargs) = match ret_dest {
+                        _ if fn_ty.ret.is_indirect() => {
+                            (llargs[0], &llargs[1..])
+                        }
                         ReturnDest::Nothing => {
-                            C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to())
+                            (C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to()), &llargs[..])
                         }
                         ReturnDest::IndirectOperand(dst, _) |
-                        ReturnDest::Store(dst) => dst.llval,
+                        ReturnDest::Store(dst) => (dst, &llargs[..]),
                         ReturnDest::DirectOperand(_) =>
                             bug!("Cannot use direct operand with an intrinsic call")
                     };
 
-                    let args: Vec<_> = args.iter().enumerate().map(|(i, arg)| {
-                        // The indices passed to simd_shuffle* in the
-                        // third argument must be constant. This is
-                        // checked by const-qualification, which also
-                        // promotes any complex rvalues to constants.
-                        if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") {
-                            match *arg {
-                                mir::Operand::Consume(_) => {
-                                    span_bug!(span, "shuffle indices must be constant");
-                                }
-                                mir::Operand::Constant(ref constant) => {
-                                    let val = self.trans_constant(&bcx, constant);
-                                    return OperandRef {
-                                        val: Immediate(val.llval),
-                                        layout: bcx.ccx.layout_of(val.ty)
-                                    };
-                                }
-                            }
-                        }
-
-                        self.trans_operand(&bcx, arg)
-                    }).collect();
-
-
                     let callee_ty = common::instance_ty(
                         bcx.ccx.tcx(), instance.as_ref().unwrap());
-                    trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &args, dest,
+                    trans_intrinsic_call(&bcx, callee_ty, &fn_ty, &llargs, dest,
                                          terminator.source_info.span);
 
                     if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
-                        self.store_return(&bcx, ret_dest, &fn_ty.ret, dst.llval);
+                        // Make a fake operand for store_return
+                        let op = OperandRef {
+                            val: Ref(dst, Alignment::AbiAligned),
+                            ty: sig.output(),
+                        };
+                        self.store_return(&bcx, ret_dest, &fn_ty.ret, op);
                     }
 
                     if let Some((_, target)) = *destination {
@@ -552,40 +581,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                     return;
                 }
 
-                // Split the rust-call tupled arguments off.
-                let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
-                    let (tup, args) = args.split_last().unwrap();
-                    (args, Some(tup))
-                } else {
-                    (&args[..], None)
-                };
-
-                for (i, arg) in first_args.iter().enumerate() {
-                    let mut op = self.trans_operand(&bcx, arg);
-                    if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
-                        if let Pair(data_ptr, meta) = op.val {
-                            llfn = Some(meth::VirtualIndex::from_index(idx)
-                                .get_fn(&bcx, meta, &fn_ty));
-                            llargs.push(data_ptr);
-                            continue;
-                        }
-                    }
-
-                    // The callee needs to own the argument memory if we pass it
-                    // by-ref, so make a local copy of non-immediate constants.
-                    if let (&mir::Operand::Constant(_), Ref(..)) = (arg, op.val) {
-                        let tmp = LvalueRef::alloca(&bcx, op.layout, "const");
-                        op.val.store(&bcx, tmp);
-                        op.val = Ref(tmp.llval, tmp.alignment);
-                    }
-
-                    self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[i]);
-                }
-                if let Some(tup) = untuple {
-                    self.trans_arguments_untupled(&bcx, tup, &mut llargs,
-                        &fn_ty.args[first_args.len()..])
-                }
-
                 let fn_ptr = match (llfn, instance) {
                     (Some(llfn), _) => llfn,
                     (None, Some(instance)) => callee::get_fn(bcx.ccx, instance),
@@ -593,7 +588,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 };
 
                 do_call(self, bcx, fn_ty, fn_ptr, &llargs,
-                        destination.as_ref().map(|&(_, target)| (ret_dest, target)),
+                        destination.as_ref().map(|&(_, target)| (ret_dest, sig.output(), target)),
                         cleanup);
             }
             mir::TerminatorKind::GeneratorDrop |
@@ -606,73 +601,79 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                       bcx: &Builder<'a, 'tcx>,
                       op: OperandRef<'tcx>,
                       llargs: &mut Vec<ValueRef>,
-                      arg: &ArgType<'tcx>) {
+                      fn_ty: &FnType<'tcx>,
+                      next_idx: &mut usize,
+                      llfn: &mut Option<ValueRef>,
+                      def: &Option<ty::InstanceDef<'tcx>>) {
+        if let Pair(a, b) = op.val {
+            // Treat the values in a fat pointer separately.
+            if common::type_is_fat_ptr(bcx.ccx, op.ty) {
+                let (ptr, meta) = (a, b);
+                if *next_idx == 0 {
+                    if let Some(ty::InstanceDef::Virtual(_, idx)) = *def {
+                        let llmeth = meth::VirtualIndex::from_index(idx).get_fn(bcx, meta);
+                        let llty = fn_ty.llvm_type(bcx.ccx).ptr_to();
+                        *llfn = Some(bcx.pointercast(llmeth, llty));
+                    }
+                }
+
+                let imm_op = |x| OperandRef {
+                    val: Immediate(x),
+                    // We won't be checking the type again.
+                    ty: bcx.tcx().types.err
+                };
+                self.trans_argument(bcx, imm_op(ptr), llargs, fn_ty, next_idx, llfn, def);
+                self.trans_argument(bcx, imm_op(meta), llargs, fn_ty, next_idx, llfn, def);
+                return;
+            }
+        }
+
+        let arg = &fn_ty.args[*next_idx];
+        *next_idx += 1;
+
         // Fill padding with undef value, where applicable.
         if let Some(ty) = arg.pad {
-            llargs.push(C_undef(ty.llvm_type(bcx.ccx)));
+            llargs.push(C_undef(ty));
         }
 
         if arg.is_ignore() {
             return;
         }
 
-        if let PassMode::Pair(..) = arg.mode {
-            match op.val {
-                Pair(a, b) => {
-                    llargs.push(a);
-                    llargs.push(b);
-                    return;
-                }
-                _ => bug!("trans_argument: {:?} invalid for pair arugment", op)
-            }
-        }
-
         // Force by-ref if we have to load through a cast pointer.
         let (mut llval, align, by_ref) = match op.val {
             Immediate(_) | Pair(..) => {
-                match arg.mode {
-                    PassMode::Indirect(_) | PassMode::Cast(_) => {
-                        let scratch = LvalueRef::alloca(bcx, arg.layout, "arg");
-                        op.val.store(bcx, scratch);
-                        (scratch.llval, Alignment::AbiAligned, true)
-                    }
-                    _ => {
-                        (op.immediate_or_packed_pair(bcx), Alignment::AbiAligned, false)
-                    }
+                if arg.is_indirect() || arg.cast.is_some() {
+                    let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None);
+                    self.store_operand(bcx, llscratch, None, op);
+                    (llscratch, Alignment::AbiAligned, true)
+                } else {
+                    (op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false)
                 }
             }
-            Ref(llval, align @ Alignment::Packed(_)) if arg.is_indirect() => {
+            Ref(llval, Alignment::Packed) if arg.is_indirect() => {
                 // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
                 // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
                 // have scary latent bugs around.
 
-                let scratch = LvalueRef::alloca(bcx, arg.layout, "arg");
-                base::memcpy_ty(bcx, scratch.llval, llval, op.layout, align.non_abi());
-                (scratch.llval, Alignment::AbiAligned, true)
+                let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None);
+                base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1));
+                (llscratch, Alignment::AbiAligned, true)
             }
             Ref(llval, align) => (llval, align, true)
         };
 
         if by_ref && !arg.is_indirect() {
             // Have to load the argument, maybe while casting it.
-            if let PassMode::Cast(ty) = arg.mode {
-                llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()),
-                                 (align | Alignment::Packed(arg.layout.align))
-                                    .non_abi());
-            } else {
-                // We can't use `LvalueRef::load` here because the argument
-                // may have a type we don't treat as immediate, but the ABI
-                // used for this call is passing it by-value. In that case,
-                // the load would just produce `OperandValue::Ref` instead
-                // of the `OperandValue::Immediate` we need for the call.
-                llval = bcx.load(llval, align.non_abi());
-                if let layout::Abi::Scalar(ref scalar) = arg.layout.abi {
-                    if scalar.is_bool() {
-                        bcx.range_metadata(llval, 0..2);
-                    }
-                }
+            if arg.layout.ty == bcx.tcx().types.bool {
                 // We store bools as i8 so we need to truncate to i1.
-                llval = base::to_immediate(bcx, llval, arg.layout);
+                llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None);
+                llval = bcx.trunc(llval, Type::i1(bcx.ccx));
+            } else if let Some(ty) = arg.cast {
+                llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()),
+                                 align.min_with(arg.layout.align(bcx.ccx).abi() as u32));
+            } else {
+                llval = bcx.load(llval, align.to_align());
             }
         }
 
@@ -683,36 +684,89 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                                 bcx: &Builder<'a, 'tcx>,
                                 operand: &mir::Operand<'tcx>,
                                 llargs: &mut Vec<ValueRef>,
-                                args: &[ArgType<'tcx>]) {
+                                fn_ty: &FnType<'tcx>,
+                                next_idx: &mut usize,
+                                llfn: &mut Option<ValueRef>,
+                                def: &Option<ty::InstanceDef<'tcx>>) {
         let tuple = self.trans_operand(bcx, operand);
 
+        let arg_types = match tuple.ty.sty {
+            ty::TyTuple(ref tys, _) => tys,
+            _ => span_bug!(self.mir.span,
+                           "bad final argument to \"rust-call\" fn {:?}", tuple.ty)
+        };
+
         // Handle both by-ref and immediate tuples.
-        if let Ref(llval, align) = tuple.val {
-            let tuple_ptr = LvalueRef::new_sized(llval, tuple.layout, align);
-            for i in 0..tuple.layout.fields.count() {
-                let field_ptr = tuple_ptr.project_field(bcx, i);
-                self.trans_argument(bcx, field_ptr.load(bcx), llargs, &args[i]);
+        match tuple.val {
+            Ref(llval, align) => {
+                for (n, &ty) in arg_types.iter().enumerate() {
+                    let ptr = LvalueRef::new_sized_ty(llval, tuple.ty, align);
+                    let (ptr, align) = ptr.trans_field_ptr(bcx, n);
+                    let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
+                        let (lldata, llextra) = base::load_fat_ptr(bcx, ptr, align, ty);
+                        Pair(lldata, llextra)
+                    } else {
+                        // trans_argument will load this if it needs to
+                        Ref(ptr, align)
+                    };
+                    let op = OperandRef {
+                        val,
+                        ty,
+                    };
+                    self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def);
+                }
+
             }
-        } else {
-            // If the tuple is immediate, the elements are as well.
-            for i in 0..tuple.layout.fields.count() {
-                let op = tuple.extract_field(bcx, i);
-                self.trans_argument(bcx, op, llargs, &args[i]);
+            Immediate(llval) => {
+                let l = bcx.ccx.layout_of(tuple.ty);
+                let v = if let layout::Univariant { ref variant, .. } = *l {
+                    variant
+                } else {
+                    bug!("Not a tuple.");
+                };
+                for (n, &ty) in arg_types.iter().enumerate() {
+                    let mut elem = bcx.extract_value(
+                        llval, adt::struct_llfields_index(v, n));
+                    // Truncate bools to i1, if needed
+                    if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) {
+                        elem = bcx.trunc(elem, Type::i1(bcx.ccx));
+                    }
+                    // If the tuple is immediate, the elements are as well
+                    let op = OperandRef {
+                        val: Immediate(elem),
+                        ty,
+                    };
+                    self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def);
+                }
+            }
+            Pair(a, b) => {
+                let elems = [a, b];
+                for (n, &ty) in arg_types.iter().enumerate() {
+                    let mut elem = elems[n];
+                    // Truncate bools to i1, if needed
+                    if ty.is_bool() && common::val_ty(elem) != Type::i1(bcx.ccx) {
+                        elem = bcx.trunc(elem, Type::i1(bcx.ccx));
+                    }
+                    // Pair is always made up of immediates
+                    let op = OperandRef {
+                        val: Immediate(elem),
+                        ty,
+                    };
+                    self.trans_argument(bcx, op, llargs, fn_ty, next_idx, llfn, def);
+                }
             }
         }
+
     }
 
-    fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> LvalueRef<'tcx> {
+    fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> ValueRef {
         let ccx = bcx.ccx;
-        if let Some(slot) = self.personality_slot {
+        if let Some(slot) = self.llpersonalityslot {
             slot
         } else {
-            let layout = ccx.layout_of(ccx.tcx().intern_tup(&[
-                ccx.tcx().mk_mut_ptr(ccx.tcx().types.u8),
-                ccx.tcx().types.i32
-            ], false));
-            let slot = LvalueRef::alloca(bcx, layout, "personalityslot");
-            self.personality_slot = Some(slot);
+            let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
+            let slot = bcx.alloca(llretty, "personalityslot", None);
+            self.llpersonalityslot = Some(slot);
             slot
         }
     }
@@ -738,24 +792,18 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
         let bcx = self.new_block("cleanup");
 
+        let ccx = bcx.ccx;
         let llpersonality = self.ccx.eh_personality();
-        let llretty = self.landing_pad_type();
-        let lp = bcx.landing_pad(llretty, llpersonality, 1, self.llfn);
-        bcx.set_cleanup(lp);
-
+        let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false);
+        let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.llfn);
+        bcx.set_cleanup(llretval);
         let slot = self.get_personality_slot(&bcx);
-        slot.storage_live(&bcx);
-        Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)).store(&bcx, slot);
-
+        Lifetime::Start.call(&bcx, slot);
+        bcx.store(llretval, slot, None);
         bcx.br(target_bb);
         bcx.llbb()
     }
 
-    fn landing_pad_type(&self) -> Type {
-        let ccx = self.ccx;
-        Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false)
-    }
-
     fn unreachable_block(&mut self) -> BasicBlockRef {
         self.unreachable_block.unwrap_or_else(|| {
             let bl = self.new_block("unreachable");
@@ -776,33 +824,31 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
     }
 
     fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>,
-                        dest: &mir::Lvalue<'tcx>, fn_ret: &ArgType<'tcx>,
-                        llargs: &mut Vec<ValueRef>, is_intrinsic: bool)
-                        -> ReturnDest<'tcx> {
+                        dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType,
+                        llargs: &mut Vec<ValueRef>, is_intrinsic: bool) -> ReturnDest {
         // If the return is ignored, we can just return a do-nothing ReturnDest
-        if fn_ret.is_ignore() {
+        if fn_ret_ty.is_ignore() {
             return ReturnDest::Nothing;
         }
         let dest = if let mir::Lvalue::Local(index) = *dest {
+            let ret_ty = self.monomorphized_lvalue_ty(dest);
             match self.locals[index] {
                 LocalRef::Lvalue(dest) => dest,
                 LocalRef::Operand(None) => {
                     // Handle temporary lvalues, specifically Operand ones, as
                     // they don't have allocas
-                    return if fn_ret.is_indirect() {
+                    return if fn_ret_ty.is_indirect() {
                         // Odd, but possible, case, we have an operand temporary,
                         // but the calling convention has an indirect return.
-                        let tmp = LvalueRef::alloca(bcx, fn_ret.layout, "tmp_ret");
-                        tmp.storage_live(bcx);
+                        let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret");
                         llargs.push(tmp.llval);
-                        ReturnDest::IndirectOperand(tmp, index)
+                        ReturnDest::IndirectOperand(tmp.llval, index)
                     } else if is_intrinsic {
                         // Currently, intrinsics always need a location to store
                         // the result. so we create a temporary alloca for the
                         // result
-                        let tmp = LvalueRef::alloca(bcx, fn_ret.layout, "tmp_ret");
-                        tmp.storage_live(bcx);
-                        ReturnDest::IndirectOperand(tmp, index)
+                        let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret");
+                        ReturnDest::IndirectOperand(tmp.llval, index)
                     } else {
                         ReturnDest::DirectOperand(index)
                     };
@@ -814,13 +860,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         } else {
             self.trans_lvalue(bcx, dest)
         };
-        if fn_ret.is_indirect() {
+        if fn_ret_ty.is_indirect() {
             match dest.alignment {
                 Alignment::AbiAligned => {
                     llargs.push(dest.llval);
                     ReturnDest::Nothing
                 },
-                Alignment::Packed(_) => {
+                Alignment::Packed => {
                     // Currently, MIR code generation does not create calls
                     // that store directly to fields of packed structs (in
                     // fact, the calls it creates write only to temps),
@@ -831,7 +877,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 }
             }
         } else {
-            ReturnDest::Store(dest)
+            ReturnDest::Store(dest.llval)
         }
     }
 
@@ -840,67 +886,63 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                        dst: &mir::Lvalue<'tcx>) {
         if let mir::Lvalue::Local(index) = *dst {
             match self.locals[index] {
-                LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, lvalue),
+                LocalRef::Lvalue(lvalue) => self.trans_transmute_into(bcx, src, &lvalue),
                 LocalRef::Operand(None) => {
-                    let dst_layout = bcx.ccx.layout_of(self.monomorphized_lvalue_ty(dst));
-                    assert!(!dst_layout.ty.has_erasable_regions());
-                    let lvalue = LvalueRef::alloca(bcx, dst_layout, "transmute_temp");
-                    lvalue.storage_live(bcx);
-                    self.trans_transmute_into(bcx, src, lvalue);
-                    let op = lvalue.load(bcx);
-                    lvalue.storage_dead(bcx);
+                    let lvalue_ty = self.monomorphized_lvalue_ty(dst);
+                    assert!(!lvalue_ty.has_erasable_regions());
+                    let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "transmute_temp");
+                    self.trans_transmute_into(bcx, src, &lvalue);
+                    let op = self.trans_load(bcx, lvalue.llval, lvalue.alignment, lvalue_ty);
                     self.locals[index] = LocalRef::Operand(Some(op));
                 }
-                LocalRef::Operand(Some(op)) => {
-                    assert!(op.layout.is_zst(),
+                LocalRef::Operand(Some(_)) => {
+                    let ty = self.monomorphized_lvalue_ty(dst);
+                    assert!(common::type_is_zero_size(bcx.ccx, ty),
                             "assigning to initialized SSAtemp");
                 }
             }
         } else {
             let dst = self.trans_lvalue(bcx, dst);
-            self.trans_transmute_into(bcx, src, dst);
+            self.trans_transmute_into(bcx, src, &dst);
         }
     }
 
     fn trans_transmute_into(&mut self, bcx: &Builder<'a, 'tcx>,
                             src: &mir::Operand<'tcx>,
-                            dst: LvalueRef<'tcx>) {
-        let src = self.trans_operand(bcx, src);
-        let llty = src.layout.llvm_type(bcx.ccx);
+                            dst: &LvalueRef<'tcx>) {
+        let val = self.trans_operand(bcx, src);
+        let llty = type_of::type_of(bcx.ccx, val.ty);
         let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
-        let align = src.layout.align.min(dst.layout.align);
-        src.val.store(bcx,
-            LvalueRef::new_sized(cast_ptr, src.layout, Alignment::Packed(align)));
+        let in_type = val.ty;
+        let out_type = dst.ty.to_ty(bcx.tcx());
+        let llalign = cmp::min(bcx.ccx.align_of(in_type), bcx.ccx.align_of(out_type));
+        self.store_operand(bcx, cast_ptr, Some(llalign), val);
     }
 
 
     // Stores the return value of a function call into it's final location.
     fn store_return(&mut self,
                     bcx: &Builder<'a, 'tcx>,
-                    dest: ReturnDest<'tcx>,
+                    dest: ReturnDest,
                     ret_ty: &ArgType<'tcx>,
-                    llval: ValueRef) {
+                    op: OperandRef<'tcx>) {
         use self::ReturnDest::*;
 
         match dest {
             Nothing => (),
-            Store(dst) => ret_ty.store(bcx, llval, dst),
+            Store(dst) => ret_ty.store(bcx, op.immediate(), dst),
             IndirectOperand(tmp, index) => {
-                let op = tmp.load(bcx);
-                tmp.storage_dead(bcx);
+                let op = self.trans_load(bcx, tmp, Alignment::AbiAligned, op.ty);
                 self.locals[index] = LocalRef::Operand(Some(op));
             }
             DirectOperand(index) => {
                 // If there is a cast, we have to store and reload.
-                let op = if let PassMode::Cast(_) = ret_ty.mode {
-                    let tmp = LvalueRef::alloca(bcx, ret_ty.layout, "tmp_ret");
-                    tmp.storage_live(bcx);
-                    ret_ty.store(bcx, llval, tmp);
-                    let op = tmp.load(bcx);
-                    tmp.storage_dead(bcx);
-                    op
+                let op = if ret_ty.cast.is_some() {
+                    let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret");
+                    ret_ty.store(bcx, op.immediate(), tmp.llval);
+                    self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty)
                 } else {
-                    OperandRef::from_immediate_or_packed_pair(bcx, llval, ret_ty.layout)
+                    op.unpack_if_pair(bcx)
                 };
                 self.locals[index] = LocalRef::Operand(Some(op));
             }
@@ -908,13 +950,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
     }
 }
 
-enum ReturnDest<'tcx> {
+enum ReturnDest {
     // Do nothing, the return value is indirect or ignored
     Nothing,
     // Store the return value to the pointer
-    Store(LvalueRef<'tcx>),
+    Store(ValueRef),
     // Stores an indirect return value to an operand local lvalue
-    IndirectOperand(LvalueRef<'tcx>, mir::Local),
+    IndirectOperand(ValueRef, mir::Local),
     // Stores a direct return value to an operand local lvalue
     DirectOperand(mir::Local)
 }
diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs
index 8c013330e5b..6573e507bd3 100644
--- a/src/librustc_trans/mir/constant.rs
+++ b/src/librustc_trans/mir/constant.rs
@@ -18,21 +18,21 @@ use rustc::traits;
 use rustc::mir;
 use rustc::mir::tcx::LvalueTy;
 use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
-use rustc::ty::layout::{self, LayoutOf, Size};
+use rustc::ty::layout::{self, LayoutTyper};
 use rustc::ty::cast::{CastTy, IntTy};
 use rustc::ty::subst::{Kind, Substs, Subst};
 use rustc_apfloat::{ieee, Float, Status};
 use rustc_data_structures::indexed_vec::{Idx, IndexVec};
-use base;
+use {adt, base, machine};
 use abi::{self, Abi};
 use callee;
 use builder::Builder;
 use common::{self, CrateContext, const_get_elt, val_ty};
-use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_uint_big, C_u32, C_u64};
-use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr};
+use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u64};
+use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, is_undef};
 use common::const_to_opt_u128;
 use consts;
-use type_of::LayoutLlvmExt;
+use type_of;
 use type_::Type;
 use value::Value;
 
@@ -55,7 +55,7 @@ pub struct Const<'tcx> {
     pub ty: Ty<'tcx>
 }
 
-impl<'a, 'tcx> Const<'tcx> {
+impl<'tcx> Const<'tcx> {
     pub fn new(llval: ValueRef, ty: Ty<'tcx>) -> Const<'tcx> {
         Const {
             llval,
@@ -63,31 +63,32 @@ impl<'a, 'tcx> Const<'tcx> {
         }
     }
 
-    pub fn from_constint(ccx: &CrateContext<'a, 'tcx>, ci: &ConstInt) -> Const<'tcx> {
+    pub fn from_constint<'a>(ccx: &CrateContext<'a, 'tcx>, ci: &ConstInt)
+    -> Const<'tcx> {
         let tcx = ccx.tcx();
         let (llval, ty) = match *ci {
             I8(v) => (C_int(Type::i8(ccx), v as i64), tcx.types.i8),
             I16(v) => (C_int(Type::i16(ccx), v as i64), tcx.types.i16),
             I32(v) => (C_int(Type::i32(ccx), v as i64), tcx.types.i32),
             I64(v) => (C_int(Type::i64(ccx), v as i64), tcx.types.i64),
-            I128(v) => (C_uint_big(Type::i128(ccx), v as u128), tcx.types.i128),
+            I128(v) => (C_big_integral(Type::i128(ccx), v as u128), tcx.types.i128),
             Isize(v) => (C_int(Type::isize(ccx), v.as_i64()), tcx.types.isize),
             U8(v) => (C_uint(Type::i8(ccx), v as u64), tcx.types.u8),
             U16(v) => (C_uint(Type::i16(ccx), v as u64), tcx.types.u16),
             U32(v) => (C_uint(Type::i32(ccx), v as u64), tcx.types.u32),
             U64(v) => (C_uint(Type::i64(ccx), v), tcx.types.u64),
-            U128(v) => (C_uint_big(Type::i128(ccx), v), tcx.types.u128),
+            U128(v) => (C_big_integral(Type::i128(ccx), v), tcx.types.u128),
             Usize(v) => (C_uint(Type::isize(ccx), v.as_u64()), tcx.types.usize),
         };
         Const { llval: llval, ty: ty }
     }
 
     /// Translate ConstVal into a LLVM constant value.
-    pub fn from_constval(ccx: &CrateContext<'a, 'tcx>,
-                         cv: &ConstVal,
-                         ty: Ty<'tcx>)
-                         -> Const<'tcx> {
-        let llty = ccx.layout_of(ty).llvm_type(ccx);
+    pub fn from_constval<'a>(ccx: &CrateContext<'a, 'tcx>,
+                             cv: &ConstVal,
+                             ty: Ty<'tcx>)
+                             -> Const<'tcx> {
+        let llty = type_of::type_of(ccx, ty);
         let val = match *cv {
             ConstVal::Float(v) => {
                 let bits = match v.ty {
@@ -99,11 +100,9 @@ impl<'a, 'tcx> Const<'tcx> {
             ConstVal::Bool(v) => C_bool(ccx, v),
             ConstVal::Integral(ref i) => return Const::from_constint(ccx, i),
             ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()),
-            ConstVal::ByteStr(v) => {
-                consts::addr_of(ccx, C_bytes(ccx, v.data), ccx.align_of(ty), "byte_str")
-            }
+            ConstVal::ByteStr(v) => consts::addr_of(ccx, C_bytes(ccx, v.data), 1, "byte_str"),
             ConstVal::Char(c) => C_uint(Type::char(ccx), c as u64),
-            ConstVal::Function(..) => C_undef(llty),
+            ConstVal::Function(..) => C_null(type_of::type_of(ccx, ty)),
             ConstVal::Variant(_) |
             ConstVal::Aggregate(..) |
             ConstVal::Unevaluated(..) => {
@@ -116,44 +115,15 @@ impl<'a, 'tcx> Const<'tcx> {
         Const::new(val, ty)
     }
 
-    fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef {
-        let layout = ccx.layout_of(self.ty);
-        let field = layout.field(ccx, i);
-        if field.is_zst() {
-            return C_undef(field.immediate_llvm_type(ccx));
-        }
-        match layout.abi {
-            layout::Abi::Scalar(_) => self.llval,
-            layout::Abi::ScalarPair(ref a, ref b) => {
-                let offset = layout.fields.offset(i);
-                if offset.bytes() == 0 {
-                    if field.size == layout.size {
-                        self.llval
-                    } else {
-                        assert_eq!(field.size, a.value.size(ccx));
-                        const_get_elt(self.llval, 0)
-                    }
-                } else {
-                    assert_eq!(offset, a.value.size(ccx)
-                        .abi_align(b.value.align(ccx)));
-                    assert_eq!(field.size, b.value.size(ccx));
-                    const_get_elt(self.llval, 1)
-                }
-            }
-            _ => {
-                const_get_elt(self.llval, layout.llvm_field_index(i))
-            }
-        }
-    }
-
-    fn get_pair(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) {
-        (self.get_field(ccx, 0), self.get_field(ccx, 1))
+    fn get_pair(&self) -> (ValueRef, ValueRef) {
+        (const_get_elt(self.llval, &[0]),
+         const_get_elt(self.llval, &[1]))
     }
 
-    fn get_fat_ptr(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) {
+    fn get_fat_ptr(&self) -> (ValueRef, ValueRef) {
         assert_eq!(abi::FAT_PTR_ADDR, 0);
         assert_eq!(abi::FAT_PTR_EXTRA, 1);
-        self.get_pair(ccx)
+        self.get_pair()
     }
 
     fn as_lvalue(&self) -> ConstLvalue<'tcx> {
@@ -164,16 +134,14 @@ impl<'a, 'tcx> Const<'tcx> {
         }
     }
 
-    pub fn to_operand(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> {
-        let layout = ccx.layout_of(self.ty);
-        let llty = layout.immediate_llvm_type(ccx);
+    pub fn to_operand<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> OperandRef<'tcx> {
+        let llty = type_of::immediate_type_of(ccx, self.ty);
         let llvalty = val_ty(self.llval);
 
-        let val = if llty == llvalty && layout.is_llvm_scalar_pair() {
-            OperandValue::Pair(
-                const_get_elt(self.llval, 0),
-                const_get_elt(self.llval, 1))
-        } else if llty == llvalty && layout.is_llvm_immediate() {
+        let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) {
+            let (a, b) = self.get_pair();
+            OperandValue::Pair(a, b)
+        } else if llty == llvalty && common::type_is_immediate(ccx, self.ty) {
             // If the types match, we can use the value directly.
             OperandValue::Immediate(self.llval)
         } else {
@@ -181,13 +149,12 @@ impl<'a, 'tcx> Const<'tcx> {
             // a constant LLVM global and cast its address if necessary.
             let align = ccx.align_of(self.ty);
             let ptr = consts::addr_of(ccx, self.llval, align, "const");
-            OperandValue::Ref(consts::ptrcast(ptr, layout.llvm_type(ccx).ptr_to()),
-                              Alignment::AbiAligned)
+            OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()), Alignment::AbiAligned)
         };
 
         OperandRef {
             val,
-            layout: ccx.layout_of(self.ty)
+            ty: self.ty
         }
     }
 }
@@ -401,12 +368,12 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                             match &tcx.item_name(def_id)[..] {
                                 "size_of" => {
                                     let llval = C_usize(self.ccx,
-                                        self.ccx.size_of(substs.type_at(0)).bytes());
+                                        self.ccx.size_of(substs.type_at(0)));
                                     Ok(Const::new(llval, tcx.types.usize))
                                 }
                                 "min_align_of" => {
                                     let llval = C_usize(self.ccx,
-                                        self.ccx.align_of(substs.type_at(0)).abi());
+                                        self.ccx.align_of(substs.type_at(0)) as u64);
                                     Ok(Const::new(llval, tcx.types.usize))
                                 }
                                 _ => span_bug!(span, "{:?} in constant", terminator.kind)
@@ -469,7 +436,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                         let (base, extra) = if !has_metadata {
                             (base.llval, ptr::null_mut())
                         } else {
-                            base.get_fat_ptr(self.ccx)
+                            base.get_fat_ptr()
                         };
                         if self.ccx.statics().borrow().contains_key(&base) {
                             (Base::Static(base), extra)
@@ -483,10 +450,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                                 span_bug!(span, "dereference of non-constant pointer `{:?}`",
                                           Value(base));
                             }
-                            let layout = self.ccx.layout_of(projected_ty);
-                            if let layout::Abi::Scalar(ref scalar) = layout.abi {
+                            if projected_ty.is_bool() {
                                 let i1_type = Type::i1(self.ccx);
-                                if scalar.is_bool() && val_ty(val) != i1_type {
+                                if val_ty(val) != i1_type {
                                     unsafe {
                                         val = llvm::LLVMConstTrunc(val, i1_type.to_ref());
                                     }
@@ -496,7 +462,8 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                         }
                     }
                     mir::ProjectionElem::Field(ref field, _) => {
-                        let llprojected = base.get_field(self.ccx, field.index());
+                        let llprojected = adt::const_get_field(self.ccx, tr_base.ty, base.llval,
+                                                               field.index());
                         let llextra = if !has_metadata {
                             ptr::null_mut()
                         } else {
@@ -517,9 +484,9 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                         // Produce an undef instead of a LLVM assertion on OOB.
                         let len = common::const_to_uint(tr_base.len(self.ccx));
                         let llelem = if iv < len as u128 {
-                            const_get_elt(base.llval, iv as u64)
+                            const_get_elt(base.llval, &[iv as u32])
                         } else {
-                            C_undef(self.ccx.layout_of(projected_ty).llvm_type(self.ccx))
+                            C_undef(type_of::type_of(self.ccx, projected_ty))
                         };
 
                         (Base::Value(llelem), ptr::null_mut())
@@ -573,7 +540,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
         let elem_ty = array_ty.builtin_index().unwrap_or_else(|| {
             bug!("bad array type {:?}", array_ty)
         });
-        let llunitty = self.ccx.layout_of(elem_ty).llvm_type(self.ccx);
+        let llunitty = type_of::type_of(self.ccx, elem_ty);
         // If the array contains enums, an LLVM array won't work.
         let val = if fields.iter().all(|&f| val_ty(f) == llunitty) {
             C_array(llunitty, fields)
@@ -599,7 +566,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                 self.const_array(dest_ty, &fields)
             }
 
-            mir::Rvalue::Aggregate(box mir::AggregateKind::Array(_), ref operands) => {
+            mir::Rvalue::Aggregate(ref kind, ref operands) => {
                 // Make sure to evaluate all operands to
                 // report as many errors as we possibly can.
                 let mut fields = Vec::with_capacity(operands.len());
@@ -612,23 +579,17 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                 }
                 failure?;
 
-                self.const_array(dest_ty, &fields)
-            }
-
-            mir::Rvalue::Aggregate(ref kind, ref operands) => {
-                // Make sure to evaluate all operands to
-                // report as many errors as we possibly can.
-                let mut fields = Vec::with_capacity(operands.len());
-                let mut failure = Ok(());
-                for operand in operands {
-                    match self.const_operand(operand, span) {
-                        Ok(val) => fields.push(val),
-                        Err(err) => if failure.is_ok() { failure = Err(err); }
+                match **kind {
+                    mir::AggregateKind::Array(_) => {
+                        self.const_array(dest_ty, &fields)
+                    }
+                    mir::AggregateKind::Adt(..) |
+                    mir::AggregateKind::Closure(..) |
+                    mir::AggregateKind::Generator(..) |
+                    mir::AggregateKind::Tuple => {
+                        Const::new(trans_const(self.ccx, dest_ty, kind, &fields), dest_ty)
                     }
                 }
-                failure?;
-
-                trans_const_adt(self.ccx, dest_ty, kind, &fields)
             }
 
             mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
@@ -674,6 +635,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                         operand.llval
                     }
                     mir::CastKind::Unsize => {
+                        // unsize targets other than to a fat pointer currently
+                        // can't be in constants.
+                        assert!(common::type_is_fat_ptr(self.ccx, cast_ty));
+
                         let pointee_ty = operand.ty.builtin_deref(true, ty::NoPreference)
                             .expect("consts: unsizing got non-pointer type").ty;
                         let (base, old_info) = if !self.ccx.shared().type_is_sized(pointee_ty) {
@@ -683,7 +648,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                             // to use a different vtable. In that case, we want to
                             // load out the original data pointer so we can repackage
                             // it.
-                            let (base, extra) = operand.get_fat_ptr(self.ccx);
+                            let (base, extra) = operand.get_fat_ptr();
                             (base, Some(extra))
                         } else {
                             (operand.llval, None)
@@ -691,7 +656,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
 
                         let unsized_ty = cast_ty.builtin_deref(true, ty::NoPreference)
                             .expect("consts: unsizing got non-pointer target type").ty;
-                        let ptr_ty = self.ccx.layout_of(unsized_ty).llvm_type(self.ccx).ptr_to();
+                        let ptr_ty = type_of::in_memory_type_of(self.ccx, unsized_ty).ptr_to();
                         let base = consts::ptrcast(base, ptr_ty);
                         let info = base::unsized_info(self.ccx, pointee_ty,
                                                       unsized_ty, old_info);
@@ -701,23 +666,22 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                                                      .insert(base, operand.llval);
                             assert!(prev_const.is_none() || prev_const == Some(operand.llval));
                         }
-                        C_fat_ptr(self.ccx, base, info)
+                        assert_eq!(abi::FAT_PTR_ADDR, 0);
+                        assert_eq!(abi::FAT_PTR_EXTRA, 1);
+                        C_struct(self.ccx, &[base, info], false)
                     }
-                    mir::CastKind::Misc if self.ccx.layout_of(operand.ty).is_llvm_immediate() => {
+                    mir::CastKind::Misc if common::type_is_immediate(self.ccx, operand.ty) => {
+                        debug_assert!(common::type_is_immediate(self.ccx, cast_ty));
                         let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
                         let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
-                        let cast_layout = self.ccx.layout_of(cast_ty);
-                        assert!(cast_layout.is_llvm_immediate());
-                        let ll_t_out = cast_layout.immediate_llvm_type(self.ccx);
+                        let ll_t_out = type_of::immediate_type_of(self.ccx, cast_ty);
                         let llval = operand.llval;
-
-                        let mut signed = false;
-                        let l = self.ccx.layout_of(operand.ty);
-                        if let layout::Abi::Scalar(ref scalar) = l.abi {
-                            if let layout::Int(_, true) = scalar.value {
-                                signed = true;
-                            }
-                        }
+                        let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in {
+                            let l = self.ccx.layout_of(operand.ty);
+                            adt::is_discr_signed(&l)
+                        } else {
+                            operand.ty.is_signed()
+                        };
 
                         unsafe {
                             match (r_t_in, r_t_out) {
@@ -756,19 +720,20 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                         }
                     }
                     mir::CastKind::Misc => { // Casts from a fat-ptr.
-                        let l = self.ccx.layout_of(operand.ty);
-                        let cast = self.ccx.layout_of(cast_ty);
-                        if l.is_llvm_scalar_pair() {
-                            let (data_ptr, meta) = operand.get_fat_ptr(self.ccx);
-                            if cast.is_llvm_scalar_pair() {
-                                let data_cast = consts::ptrcast(data_ptr,
-                                    cast.scalar_pair_element_llvm_type(self.ccx, 0));
-                                C_fat_ptr(self.ccx, data_cast, meta)
+                        let ll_cast_ty = type_of::immediate_type_of(self.ccx, cast_ty);
+                        let ll_from_ty = type_of::immediate_type_of(self.ccx, operand.ty);
+                        if common::type_is_fat_ptr(self.ccx, operand.ty) {
+                            let (data_ptr, meta_ptr) = operand.get_fat_ptr();
+                            if common::type_is_fat_ptr(self.ccx, cast_ty) {
+                                let ll_cft = ll_cast_ty.field_types();
+                                let ll_fft = ll_from_ty.field_types();
+                                let data_cast = consts::ptrcast(data_ptr, ll_cft[0]);
+                                assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
+                                C_struct(self.ccx, &[data_cast, meta_ptr], false)
                             } else { // cast to thin-ptr
                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
                                 // pointer-cast of that pointer to desired pointer type.
-                                let llcast_ty = cast.immediate_llvm_type(self.ccx);
-                                consts::ptrcast(data_ptr, llcast_ty)
+                                consts::ptrcast(data_ptr, ll_cast_ty)
                             }
                         } else {
                             bug!("Unexpected non-fat-pointer operand")
@@ -791,7 +756,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                         let align = if self.ccx.shared().type_is_sized(ty) {
                             self.ccx.align_of(ty)
                         } else {
-                            self.ccx.tcx().data_layout.pointer_align
+                            self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign
                         };
                         if bk == mir::BorrowKind::Mut {
                             consts::addr_of_mut(self.ccx, llval, align, "ref_mut")
@@ -806,7 +771,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
                 let ptr = if self.ccx.shared().type_is_sized(ty) {
                     base
                 } else {
-                    C_fat_ptr(self.ccx, base, tr_lvalue.llextra)
+                    C_struct(self.ccx, &[base, tr_lvalue.llextra], false)
                 };
                 Const::new(ptr, ref_ty)
             }
@@ -836,10 +801,8 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
 
                 match const_scalar_checked_binop(tcx, op, lhs, rhs, ty) {
                     Some((llval, of)) => {
-                        trans_const_adt(self.ccx, binop_ty, &mir::AggregateKind::Tuple, &[
-                            Const::new(llval, val_ty),
-                            Const::new(C_bool(self.ccx, of), tcx.types.bool)
-                        ])
+                        let llof = C_bool(self.ccx, of);
+                        Const::new(C_struct(self.ccx, &[llval, llof], false), binop_ty)
                     }
                     None => {
                         span_bug!(span, "{:?} got non-integer operands: {:?} and {:?}",
@@ -873,7 +836,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
 
             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
                 assert!(self.ccx.shared().type_is_sized(ty));
-                let llval = C_usize(self.ccx, self.ccx.size_of(ty).bytes());
+                let llval = C_usize(self.ccx, self.ccx.size_of(ty));
                 Const::new(llval, tcx.types.usize)
             }
 
@@ -1023,7 +986,7 @@ unsafe fn cast_const_float_to_int(ccx: &CrateContext,
         let err = ConstEvalErr { span: span, kind: ErrKind::CannotCast };
         err.report(ccx.tcx(), span, "expression");
     }
-    C_uint_big(int_ty, cast_result.value)
+    C_big_integral(int_ty, cast_result.value)
 }
 
 unsafe fn cast_const_int_to_float(ccx: &CrateContext,
@@ -1074,7 +1037,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
         let result = result.unwrap_or_else(|_| {
             // We've errored, so we don't have to produce working code.
-            let llty = bcx.ccx.layout_of(ty).llvm_type(bcx.ccx);
+            let llty = type_of::type_of(bcx.ccx, ty);
             Const::new(C_undef(llty), ty)
         });
 
@@ -1112,41 +1075,19 @@ pub fn trans_static_initializer<'a, 'tcx>(
 /// Currently the returned value has the same size as the type, but
 /// this could be changed in the future to avoid allocating unnecessary
 /// space after values of shorter-than-maximum cases.
-fn trans_const_adt<'a, 'tcx>(
+fn trans_const<'a, 'tcx>(
     ccx: &CrateContext<'a, 'tcx>,
     t: Ty<'tcx>,
     kind: &mir::AggregateKind,
-    vals: &[Const<'tcx>]
-) -> Const<'tcx> {
+    vals: &[ValueRef]
+) -> ValueRef {
     let l = ccx.layout_of(t);
     let variant_index = match *kind {
         mir::AggregateKind::Adt(_, index, _, _) => index,
         _ => 0,
     };
-
-    if let layout::Abi::Uninhabited = l.abi {
-        return Const::new(C_undef(l.llvm_type(ccx)), t);
-    }
-
-    match l.variants {
-        layout::Variants::Single { index } => {
-            assert_eq!(variant_index, index);
-            if let layout::Abi::Vector = l.abi {
-                Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::<Vec<_>>()), t)
-            } else if let layout::FieldPlacement::Union(_) = l.fields {
-                assert_eq!(variant_index, 0);
-                assert_eq!(vals.len(), 1);
-                let contents = [
-                    vals[0].llval,
-                    padding(ccx, l.size - ccx.size_of(vals[0].ty))
-                ];
-
-                Const::new(C_struct(ccx, &contents, l.is_packed()), t)
-            } else {
-                build_const_struct(ccx, l, vals, None)
-            }
-        }
-        layout::Variants::Tagged { .. } => {
+    match *l {
+        layout::CEnum { discr: d, min, max, .. } => {
             let discr = match *kind {
                 mir::AggregateKind::Adt(adt_def, _, _, _) => {
                     adt_def.discriminant_for_variant(ccx.tcx(), variant_index)
@@ -1154,103 +1095,114 @@ fn trans_const_adt<'a, 'tcx>(
                 },
                 _ => 0,
             };
-            let discr_field = l.field(ccx, 0);
-            let discr = C_int(discr_field.llvm_type(ccx), discr as i64);
-            if let layout::Abi::Scalar(_) = l.abi {
-                Const::new(discr, t)
+            assert_eq!(vals.len(), 0);
+            adt::assert_discr_in_range(min, max, discr);
+            C_int(Type::from_integer(ccx, d), discr as i64)
+        }
+        layout::General { discr: d, ref variants, .. } => {
+            let variant = &variants[variant_index];
+            let lldiscr = C_int(Type::from_integer(ccx, d), variant_index as i64);
+            let mut vals_with_discr = vec![lldiscr];
+            vals_with_discr.extend_from_slice(vals);
+            let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]);
+            let needed_padding = l.size(ccx).bytes() - variant.stride().bytes();
+            if needed_padding > 0 {
+                contents.push(padding(ccx, needed_padding));
+            }
+            C_struct(ccx, &contents[..], false)
+        }
+        layout::UntaggedUnion { ref variants, .. }=> {
+            assert_eq!(variant_index, 0);
+            let contents = build_const_union(ccx, variants, vals[0]);
+            C_struct(ccx, &contents, variants.packed)
+        }
+        layout::Univariant { ref variant, .. } => {
+            assert_eq!(variant_index, 0);
+            let contents = build_const_struct(ccx, &variant, vals);
+            C_struct(ccx, &contents[..], variant.packed)
+        }
+        layout::Vector { .. } => {
+            C_vector(vals)
+        }
+        layout::RawNullablePointer { nndiscr, .. } => {
+            if variant_index as u64 == nndiscr {
+                assert_eq!(vals.len(), 1);
+                vals[0]
             } else {
-                let discr = Const::new(discr, discr_field.ty);
-                build_const_struct(ccx, l.for_variant(ccx, variant_index), vals, Some(discr))
+                C_null(type_of::type_of(ccx, t))
             }
         }
-        layout::Variants::NicheFilling {
-            dataful_variant,
-            ref niche_variants,
-            niche_start,
-            ..
-        } => {
-            if variant_index == dataful_variant {
-                build_const_struct(ccx, l.for_variant(ccx, dataful_variant), vals, None)
+        layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
+            if variant_index as u64 == nndiscr {
+                C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false)
             } else {
-                let niche = l.field(ccx, 0);
-                let niche_llty = niche.llvm_type(ccx);
-                let niche_value = ((variant_index - niche_variants.start) as u128)
-                    .wrapping_add(niche_start);
-                // FIXME(eddyb) Check the actual primitive type here.
-                let niche_llval = if niche_value == 0 {
-                    // HACK(eddyb) Using `C_null` as it works on all types.
-                    C_null(niche_llty)
-                } else {
-                    C_uint_big(niche_llty, niche_value)
-                };
-                build_const_struct(ccx, l, &[Const::new(niche_llval, niche.ty)], None)
+                // Always use null even if it's not the `discrfield`th
+                // field; see #8506.
+                C_null(type_of::type_of(ccx, t))
             }
         }
+        _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l)
     }
 }
 
 /// Building structs is a little complicated, because we might need to
 /// insert padding if a field's value is less aligned than its type.
 ///
-/// Continuing the example from `trans_const_adt`, a value of type `(u32,
+/// Continuing the example from `trans_const`, a value of type `(u32,
 /// E)` should have the `E` at offset 8, but if that field's
 /// initializer is 4-byte aligned then simply translating the tuple as
 /// a two-element struct will locate it at offset 4, and accesses to it
 /// will read the wrong memory.
 fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                layout: layout::TyLayout<'tcx>,
-                                vals: &[Const<'tcx>],
-                                discr: Option<Const<'tcx>>)
-                                -> Const<'tcx> {
-    assert_eq!(vals.len(), layout.fields.count());
-
-    match layout.abi {
-        layout::Abi::Scalar(_) |
-        layout::Abi::ScalarPair(..) if discr.is_none() => {
-            let mut non_zst_fields = vals.iter().enumerate().map(|(i, f)| {
-                (f, layout.fields.offset(i))
-            }).filter(|&(f, _)| !ccx.layout_of(f.ty).is_zst());
-            match (non_zst_fields.next(), non_zst_fields.next()) {
-                (Some((x, offset)), None) if offset.bytes() == 0 => {
-                    return Const::new(x.llval, layout.ty);
-                }
-                (Some((a, a_offset)), Some((b, _))) if a_offset.bytes() == 0 => {
-                    return Const::new(C_struct(ccx, &[a.llval, b.llval], false), layout.ty);
-                }
-                (Some((a, _)), Some((b, b_offset))) if b_offset.bytes() == 0 => {
-                    return Const::new(C_struct(ccx, &[b.llval, a.llval], false), layout.ty);
-                }
-                _ => {}
-            }
-        }
-        _ => {}
+                                st: &layout::Struct,
+                                vals: &[ValueRef])
+                                -> Vec<ValueRef> {
+    assert_eq!(vals.len(), st.offsets.len());
+
+    if vals.len() == 0 {
+        return Vec::new();
     }
 
     // offset of current value
-    let mut offset = Size::from_bytes(0);
+    let mut offset = 0;
     let mut cfields = Vec::new();
-    cfields.reserve(discr.is_some() as usize + 1 + layout.fields.count() * 2);
+    cfields.reserve(st.offsets.len()*2);
 
-    if let Some(discr) = discr {
-        cfields.push(discr.llval);
-        offset = ccx.size_of(discr.ty);
+    let parts = st.field_index_by_increasing_offset().map(|i| {
+        (&vals[i], st.offsets[i].bytes())
+    });
+    for (&val, target_offset) in parts {
+        if offset < target_offset {
+            cfields.push(padding(ccx, target_offset - offset));
+            offset = target_offset;
+        }
+        assert!(!is_undef(val));
+        cfields.push(val);
+        offset += machine::llsize_of_alloc(ccx, val_ty(val));
     }
 
-    let parts = layout.fields.index_by_increasing_offset().map(|i| {
-        (vals[i], layout.fields.offset(i))
-    });
-    for (val, target_offset) in parts {
-        cfields.push(padding(ccx, target_offset - offset));
-        cfields.push(val.llval);
-        offset = target_offset + ccx.size_of(val.ty);
+    if offset < st.stride().bytes() {
+        cfields.push(padding(ccx, st.stride().bytes() - offset));
     }
 
-    // Pad to the size of the whole type, not e.g. the variant.
-    cfields.push(padding(ccx, ccx.size_of(layout.ty) - offset));
+    cfields
+}
+
+fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
+                               un: &layout::Union,
+                               field_val: ValueRef)
+                               -> Vec<ValueRef> {
+    let mut cfields = vec![field_val];
+
+    let offset = machine::llsize_of_alloc(ccx, val_ty(field_val));
+    let size = un.stride().bytes();
+    if offset != size {
+        cfields.push(padding(ccx, size - offset));
+    }
 
-    Const::new(C_struct(ccx, &cfields, layout.is_packed()), layout.ty)
+    cfields
 }
 
-fn padding(ccx: &CrateContext, size: Size) -> ValueRef {
-    C_undef(Type::array(&Type::i8(ccx), size.bytes()))
+fn padding(ccx: &CrateContext, size: u64) -> ValueRef {
+    C_undef(Type::array(&Type::i8(ccx), size))
 }
diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs
index 891d52045c2..d939acaccd9 100644
--- a/src/librustc_trans/mir/lvalue.rs
+++ b/src/librustc_trans/mir/lvalue.rs
@@ -8,17 +8,18 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-use llvm::{self, ValueRef};
-use rustc::ty::{self, Ty};
-use rustc::ty::layout::{self, Align, TyLayout, LayoutOf};
+use llvm::ValueRef;
+use rustc::ty::{self, Ty, TypeFoldable};
+use rustc::ty::layout::{self, LayoutTyper};
 use rustc::mir;
 use rustc::mir::tcx::LvalueTy;
 use rustc_data_structures::indexed_vec::Idx;
-use base;
+use adt;
 use builder::Builder;
-use common::{CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, C_uint_big};
+use common::{self, CrateContext, C_usize};
 use consts;
-use type_of::LayoutLlvmExt;
+use machine;
+use type_of;
 use type_::Type;
 use value::Value;
 use glue;
@@ -27,11 +28,10 @@ use std::ptr;
 use std::ops;
 
 use super::{MirContext, LocalRef};
-use super::operand::{OperandRef, OperandValue};
 
 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
 pub enum Alignment {
-    Packed(Align),
+    Packed,
     AbiAligned,
 }
 
@@ -40,36 +40,34 @@ impl ops::BitOr for Alignment {
 
     fn bitor(self, rhs: Self) -> Self {
         match (self, rhs) {
-            (Alignment::Packed(a), Alignment::Packed(b)) => {
-                Alignment::Packed(a.min(b))
-            }
-            (Alignment::Packed(x), _) | (_, Alignment::Packed(x)) => {
-                Alignment::Packed(x)
-            }
-            (Alignment::AbiAligned, Alignment::AbiAligned) => {
-                Alignment::AbiAligned
-            }
+            (Alignment::Packed, _) => Alignment::Packed,
+            (Alignment::AbiAligned, a) => a,
         }
     }
 }
 
-impl<'a> From<TyLayout<'a>> for Alignment {
-    fn from(layout: TyLayout) -> Self {
-        if layout.is_packed() {
-            Alignment::Packed(layout.align)
+impl Alignment {
+    pub fn from_packed(packed: bool) -> Self {
+        if packed {
+            Alignment::Packed
         } else {
             Alignment::AbiAligned
         }
     }
-}
 
-impl Alignment {
-    pub fn non_abi(self) -> Option<Align> {
+    pub fn to_align(self) -> Option<u32> {
         match self {
-            Alignment::Packed(x) => Some(x),
+            Alignment::Packed => Some(1),
             Alignment::AbiAligned => None,
         }
     }
+
+    pub fn min_with(self, align: u32) -> Option<u32> {
+        match self {
+            Alignment::Packed => Some(1),
+            Alignment::AbiAligned => Some(align),
+        }
+    }
 }
 
 #[derive(Copy, Clone, Debug)]
@@ -81,43 +79,41 @@ pub struct LvalueRef<'tcx> {
     pub llextra: ValueRef,
 
     /// Monomorphized type of this lvalue, including variant information
-    pub layout: TyLayout<'tcx>,
+    pub ty: LvalueTy<'tcx>,
 
     /// Whether this lvalue is known to be aligned according to its layout
     pub alignment: Alignment,
 }
 
 impl<'a, 'tcx> LvalueRef<'tcx> {
-    pub fn new_sized(llval: ValueRef,
-                     layout: TyLayout<'tcx>,
-                     alignment: Alignment)
-                     -> LvalueRef<'tcx> {
-        LvalueRef {
-            llval,
-            llextra: ptr::null_mut(),
-            layout,
-            alignment
-        }
+    pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>,
+                     alignment: Alignment) -> LvalueRef<'tcx> {
+        LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty, alignment: alignment }
     }
 
-    pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str)
-                  -> LvalueRef<'tcx> {
-        debug!("alloca({:?}: {:?})", name, layout);
-        let tmp = bcx.alloca(layout.llvm_type(bcx.ccx), name, layout.align);
-        Self::new_sized(tmp, layout, Alignment::AbiAligned)
+    pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> {
+        LvalueRef::new_sized(llval, LvalueTy::from_ty(ty), alignment)
+    }
+
+    pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> {
+        debug!("alloca({:?}: {:?})", name, ty);
+        let tmp = bcx.alloca(
+            type_of::type_of(bcx.ccx, ty), name, bcx.ccx.over_align_of(ty));
+        assert!(!ty.has_param_types());
+        Self::new_sized_ty(tmp, ty, Alignment::AbiAligned)
     }
 
     pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
-        if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
-            if self.layout.is_unsized() {
-                assert!(self.has_extra());
-                assert_eq!(count, 0);
+        let ty = self.ty.to_ty(ccx.tcx());
+        match ty.sty {
+            ty::TyArray(_, n) => {
+                common::C_usize(ccx, n.val.to_const_int().unwrap().to_u64().unwrap())
+            }
+            ty::TySlice(_) | ty::TyStr => {
+                assert!(self.llextra != ptr::null_mut());
                 self.llextra
-            } else {
-                C_usize(ccx, count)
             }
-        } else {
-            bug!("unexpected layout `{:#?}` in LvalueRef::len", self.layout)
+            _ => bug!("unexpected type `{}` in LvalueRef::len", ty)
         }
     }
 
@@ -125,132 +121,53 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
         !self.llextra.is_null()
     }
 
-    pub fn load(&self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
-        debug!("LvalueRef::load: {:?}", self);
-
-        assert!(!self.has_extra());
-
-        if self.layout.is_zst() {
-            return OperandRef::new_zst(bcx.ccx, self.layout);
-        }
-
-        let scalar_load_metadata = |load, scalar: &layout::Scalar| {
-            let (min, max) = (scalar.valid_range.start, scalar.valid_range.end);
-            let max_next = max.wrapping_add(1);
-            let bits = scalar.value.size(bcx.ccx).bits();
-            assert!(bits <= 128);
-            let mask = !0u128 >> (128 - bits);
-            // For a (max) value of -1, max will be `-1 as usize`, which overflows.
-            // However, that is fine here (it would still represent the full range),
-            // i.e., if the range is everything.  The lo==hi case would be
-            // rejected by the LLVM verifier (it would mean either an
-            // empty set, which is impossible, or the entire range of the
-            // type, which is pointless).
-            match scalar.value {
-                layout::Int(..) if max_next & mask != min & mask => {
-                    // llvm::ConstantRange can deal with ranges that wrap around,
-                    // so an overflow on (max + 1) is fine.
-                    bcx.range_metadata(load, min..max_next);
-                }
-                layout::Pointer if 0 < min && min < max => {
-                    bcx.nonnull_metadata(load);
-                }
-                _ => {}
-            }
-        };
+    fn struct_field_ptr(
+        self,
+        bcx: &Builder<'a, 'tcx>,
+        st: &layout::Struct,
+        fields: &Vec<Ty<'tcx>>,
+        ix: usize,
+        needs_cast: bool
+    ) -> (ValueRef, Alignment) {
+        let fty = fields[ix];
+        let ccx = bcx.ccx;
 
-        let val = if self.layout.is_llvm_immediate() {
-            let mut const_llval = ptr::null_mut();
-            unsafe {
-                let global = llvm::LLVMIsAGlobalVariable(self.llval);
-                if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
-                    const_llval = llvm::LLVMGetInitializer(global);
-                }
-            }
+        let alignment = self.alignment | Alignment::from_packed(st.packed);
 
-            let llval = if !const_llval.is_null() {
-                const_llval
-            } else {
-                let load = bcx.load(self.llval, self.alignment.non_abi());
-                if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
-                    scalar_load_metadata(load, scalar);
-                }
-                load
-            };
-            OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout))
-        } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
-            let load = |i, scalar: &layout::Scalar| {
-                let mut llptr = bcx.struct_gep(self.llval, i as u64);
-                // Make sure to always load i1 as i8.
-                if scalar.is_bool() {
-                    llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
-                }
-                let load = bcx.load(llptr, self.alignment.non_abi());
-                scalar_load_metadata(load, scalar);
-                if scalar.is_bool() {
-                    bcx.trunc(load, Type::i1(bcx.ccx))
-                } else {
-                    load
-                }
-            };
-            OperandValue::Pair(load(0, a), load(1, b))
+        let llfields = adt::struct_llfields(ccx, fields, st);
+        let ptr_val = if needs_cast {
+            let real_ty = Type::struct_(ccx, &llfields[..], st.packed);
+            bcx.pointercast(self.llval, real_ty.ptr_to())
         } else {
-            OperandValue::Ref(self.llval, self.alignment)
-        };
-
-        OperandRef { val, layout: self.layout }
-    }
-
-    /// Access a field, at a point when the value's case is known.
-    pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx> {
-        let ccx = bcx.ccx;
-        let field = self.layout.field(ccx, ix);
-        let offset = self.layout.fields.offset(ix);
-        let alignment = self.alignment | Alignment::from(self.layout);
-
-        let simple = || {
-            // Unions and newtypes only use an offset of 0.
-            let llval = if offset.bytes() == 0 {
-                self.llval
-            } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
-                // Offsets have to match either first or second field.
-                assert_eq!(offset, a.value.size(ccx).abi_align(b.value.align(ccx)));
-                bcx.struct_gep(self.llval, 1)
-            } else {
-                bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
-            };
-            LvalueRef {
-                // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-                llval: bcx.pointercast(llval, field.llvm_type(ccx).ptr_to()),
-                llextra: if ccx.shared().type_has_metadata(field.ty) {
-                    self.llextra
-                } else {
-                    ptr::null_mut()
-                },
-                layout: field,
-                alignment,
-            }
+            self.llval
         };
 
         // Simple case - we can just GEP the field
+        //   * First field - Always aligned properly
         //   * Packed struct - There is no alignment padding
         //   * Field is sized - pointer is properly aligned already
-        if self.layout.is_packed() || !field.is_unsized() {
-            return simple();
+        if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
+            bcx.ccx.shared().type_is_sized(fty)
+        {
+            return (bcx.struct_gep(
+                    ptr_val, adt::struct_llfields_index(st, ix)), alignment);
         }
 
         // If the type of the last field is [T], str or a foreign type, then we don't need to do
         // any adjusments
-        match field.ty.sty {
-            ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => return simple(),
+        match fty.sty {
+            ty::TySlice(..) | ty::TyStr | ty::TyForeign(..) => {
+                return (bcx.struct_gep(
+                        ptr_val, adt::struct_llfields_index(st, ix)), alignment);
+            }
             _ => ()
         }
 
         // There's no metadata available, log the case and just do the GEP.
         if !self.has_extra() {
             debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
-                ix, Value(self.llval));
-            return simple();
+                ix, Value(ptr_val));
+            return (bcx.struct_gep(ptr_val, adt::struct_llfields_index(st, ix)), alignment);
         }
 
         // We need to get the pointer manually now.
@@ -270,10 +187,12 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
 
         let meta = self.llextra;
 
-        let unaligned_offset = C_usize(ccx, offset.bytes());
+
+        let offset = st.offsets[ix].bytes();
+        let unaligned_offset = C_usize(bcx.ccx, offset);
 
         // Get the alignment of the field
-        let (_, align) = glue::size_and_align_of_dst(bcx, field.ty, meta);
+        let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta);
 
         // Bump the unaligned offset up to the appropriate alignment using the
         // following expression:
@@ -281,166 +200,89 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
         //   (unaligned offset + (align - 1)) & -align
 
         // Calculate offset
-        let align_sub_1 = bcx.sub(align, C_usize(ccx, 1u64));
+        let align_sub_1 = bcx.sub(align, C_usize(bcx.ccx, 1));
         let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
         bcx.neg(align));
 
         debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
 
         // Cast and adjust pointer
-        let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx));
+        let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx));
         let byte_ptr = bcx.gep(byte_ptr, &[offset]);
 
         // Finally, cast back to the type expected
-        let ll_fty = field.llvm_type(ccx);
+        let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty);
         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
-
-        LvalueRef {
-            llval: bcx.pointercast(byte_ptr, ll_fty.ptr_to()),
-            llextra: self.llextra,
-            layout: field,
-            alignment,
-        }
+        (bcx.pointercast(byte_ptr, ll_fty.ptr_to()), alignment)
     }
 
-    /// Obtain the actual discriminant of a value.
-    pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef {
-        let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx);
-        match self.layout.variants {
-            layout::Variants::Single { index } => {
-                return C_uint(cast_to, index as u64);
+    /// Access a field, at a point when the value's case is known.
+    pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) {
+        let discr = match self.ty {
+            LvalueTy::Ty { .. } => 0,
+            LvalueTy::Downcast { variant_index, .. } => variant_index,
+        };
+        let t = self.ty.to_ty(bcx.tcx());
+        let l = bcx.ccx.layout_of(t);
+        // Note: if this ever needs to generate conditionals (e.g., if we
+        // decide to do some kind of cdr-coding-like non-unique repr
+        // someday), it will need to return a possibly-new bcx as well.
+        match *l {
+            layout::Univariant { ref variant, .. } => {
+                assert_eq!(discr, 0);
+                self.struct_field_ptr(bcx, &variant,
+                    &adt::compute_fields(bcx.ccx, t, 0, false), ix, false)
             }
-            layout::Variants::Tagged { .. } |
-            layout::Variants::NicheFilling { .. } => {},
-        }
-
-        let discr = self.project_field(bcx, 0);
-        let lldiscr = discr.load(bcx).immediate();
-        match self.layout.variants {
-            layout::Variants::Single { .. } => bug!(),
-            layout::Variants::Tagged { ref discr, .. } => {
-                let signed = match discr.value {
-                    layout::Int(_, signed) => signed,
-                    _ => false
-                };
-                bcx.intcast(lldiscr, cast_to, signed)
+            layout::Vector { count, .. } => {
+                assert_eq!(discr, 0);
+                assert!((ix as u64) < count);
+                (bcx.struct_gep(self.llval, ix), self.alignment)
             }
-            layout::Variants::NicheFilling {
-                dataful_variant,
-                ref niche_variants,
-                niche_start,
-                ..
-            } => {
-                let niche_llty = discr.layout.immediate_llvm_type(bcx.ccx);
-                if niche_variants.start == niche_variants.end {
-                    // FIXME(eddyb) Check the actual primitive type here.
-                    let niche_llval = if niche_start == 0 {
-                        // HACK(eddyb) Using `C_null` as it works on all types.
-                        C_null(niche_llty)
-                    } else {
-                        C_uint_big(niche_llty, niche_start)
-                    };
-                    bcx.select(bcx.icmp(llvm::IntEQ, lldiscr, niche_llval),
-                        C_uint(cast_to, niche_variants.start as u64),
-                        C_uint(cast_to, dataful_variant as u64))
-                } else {
-                    // Rebase from niche values to discriminant values.
-                    let delta = niche_start.wrapping_sub(niche_variants.start as u128);
-                    let lldiscr = bcx.sub(lldiscr, C_uint_big(niche_llty, delta));
-                    let lldiscr_max = C_uint(niche_llty, niche_variants.end as u64);
-                    bcx.select(bcx.icmp(llvm::IntULE, lldiscr, lldiscr_max),
-                        bcx.intcast(lldiscr, cast_to, false),
-                        C_uint(cast_to, dataful_variant as u64))
-                }
+            layout::General { discr: d, ref variants, .. } => {
+                let mut fields = adt::compute_fields(bcx.ccx, t, discr, false);
+                fields.insert(0, d.to_ty(&bcx.tcx(), false));
+                self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true)
             }
-        }
-    }
-
-    /// Set the discriminant for a new value of the given case of the given
-    /// representation.
-    pub fn trans_set_discr(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize) {
-        match self.layout.variants {
-            layout::Variants::Single { index } => {
-                if index != variant_index {
-                    // If the layout of an enum is `Single`, all
-                    // other variants are necessarily uninhabited.
-                    assert_eq!(self.layout.for_variant(bcx.ccx, variant_index).abi,
-                               layout::Abi::Uninhabited);
-                }
+            layout::UntaggedUnion { ref variants } => {
+                let fields = adt::compute_fields(bcx.ccx, t, 0, false);
+                let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]);
+                (bcx.pointercast(self.llval, ty.ptr_to()),
+                 self.alignment | Alignment::from_packed(variants.packed))
             }
-            layout::Variants::Tagged { .. } => {
-                let ptr = self.project_field(bcx, 0);
-                let to = self.layout.ty.ty_adt_def().unwrap()
-                    .discriminant_for_variant(bcx.tcx(), variant_index)
-                    .to_u128_unchecked() as u64;
-                bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64),
-                    ptr.llval, ptr.alignment.non_abi());
+            layout::RawNullablePointer { nndiscr, .. } |
+            layout::StructWrappedNullablePointer { nndiscr,  .. } if discr as u64 != nndiscr => {
+                let nullfields = adt::compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false);
+                // The unit-like case might have a nonzero number of unit-like fields.
+                // (e.d., Result of Either with (), as one side.)
+                let ty = type_of::type_of(bcx.ccx, nullfields[ix]);
+                assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0);
+                (bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed)
             }
-            layout::Variants::NicheFilling {
-                dataful_variant,
-                ref niche_variants,
-                niche_start,
-                ..
-            } => {
-                if variant_index != dataful_variant {
-                    if bcx.sess().target.target.arch == "arm" ||
-                       bcx.sess().target.target.arch == "aarch64" {
-                        // Issue #34427: As workaround for LLVM bug on ARM,
-                        // use memset of 0 before assigning niche value.
-                        let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to());
-                        let fill_byte = C_u8(bcx.ccx, 0);
-                        let (size, align) = self.layout.size_and_align();
-                        let size = C_usize(bcx.ccx, size.bytes());
-                        let align = C_u32(bcx.ccx, align.abi() as u32);
-                        base::call_memset(bcx, llptr, fill_byte, size, align, false);
-                    }
-
-                    let niche = self.project_field(bcx, 0);
-                    let niche_llty = niche.layout.immediate_llvm_type(bcx.ccx);
-                    let niche_value = ((variant_index - niche_variants.start) as u128)
-                        .wrapping_add(niche_start);
-                    // FIXME(eddyb) Check the actual primitive type here.
-                    let niche_llval = if niche_value == 0 {
-                        // HACK(eddyb) Using `C_null` as it works on all types.
-                        C_null(niche_llty)
-                    } else {
-                        C_uint_big(niche_llty, niche_value)
-                    };
-                    OperandValue::Immediate(niche_llval).store(bcx, niche);
-                }
+            layout::RawNullablePointer { nndiscr, .. } => {
+                let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
+                assert_eq!(ix, 0);
+                assert_eq!(discr as u64, nndiscr);
+                let ty = type_of::type_of(bcx.ccx, nnty);
+                (bcx.pointercast(self.llval, ty.ptr_to()), self.alignment)
             }
+            layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
+                assert_eq!(discr as u64, nndiscr);
+                self.struct_field_ptr(bcx, &nonnull,
+                     &adt::compute_fields(bcx.ccx, t, discr, false), ix, false)
+            }
+            _ => bug!("element access in type without elements: {} represented as {:#?}", t, l)
         }
     }
 
-    pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef)
-                         -> LvalueRef<'tcx> {
-        LvalueRef {
-            llval: bcx.inbounds_gep(self.llval, &[C_usize(bcx.ccx, 0), llindex]),
-            llextra: ptr::null_mut(),
-            layout: self.layout.field(bcx.ccx, 0),
-            alignment: self.alignment
+    pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef {
+        if let ty::TySlice(_) = self.ty.to_ty(bcx.tcx()).sty {
+            // Slices already point to the array element type.
+            bcx.inbounds_gep(self.llval, &[llindex])
+        } else {
+            let zero = common::C_usize(bcx.ccx, 0);
+            bcx.inbounds_gep(self.llval, &[zero, llindex])
         }
     }
-
-    pub fn project_downcast(&self, bcx: &Builder<'a, 'tcx>, variant_index: usize)
-                            -> LvalueRef<'tcx> {
-        let mut downcast = *self;
-        downcast.layout = self.layout.for_variant(bcx.ccx, variant_index);
-
-        // Cast to the appropriate variant struct type.
-        let variant_ty = downcast.layout.llvm_type(bcx.ccx);
-        downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to());
-
-        downcast
-    }
-
-    pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) {
-        bcx.lifetime_start(self.llval, self.layout.size);
-    }
-
-    pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) {
-        bcx.lifetime_end(self.llval, self.layout.size);
-    }
 }
 
 impl<'a, 'tcx> MirContext<'a, 'tcx> {
@@ -468,7 +310,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             mir::Lvalue::Local(_) => bug!(), // handled above
             mir::Lvalue::Static(box mir::Static { def_id, ty }) => {
                 LvalueRef::new_sized(consts::get_static(ccx, def_id),
-                                     ccx.layout_of(self.monomorphize(&ty)),
+                                     LvalueTy::from_ty(self.monomorphize(&ty)),
                                      Alignment::AbiAligned)
             },
             mir::Lvalue::Projection(box mir::Projection {
@@ -476,27 +318,37 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 elem: mir::ProjectionElem::Deref
             }) => {
                 // Load the pointer from its location.
-                self.trans_consume(bcx, base).deref(bcx.ccx)
+                self.trans_consume(bcx, base).deref()
             }
             mir::Lvalue::Projection(ref projection) => {
                 let tr_base = self.trans_lvalue(bcx, &projection.base);
+                let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
+                let projected_ty = self.monomorphize(&projected_ty);
+                let align = tr_base.alignment;
 
-                match projection.elem {
+                let ((llprojected, align), llextra) = match projection.elem {
                     mir::ProjectionElem::Deref => bug!(),
                     mir::ProjectionElem::Field(ref field, _) => {
-                        tr_base.project_field(bcx, field.index())
+                        let has_metadata = self.ccx.shared()
+                            .type_has_metadata(projected_ty.to_ty(tcx));
+                        let llextra = if !has_metadata {
+                            ptr::null_mut()
+                        } else {
+                            tr_base.llextra
+                        };
+                        (tr_base.trans_field_ptr(bcx, field.index()), llextra)
                     }
                     mir::ProjectionElem::Index(index) => {
                         let index = &mir::Operand::Consume(mir::Lvalue::Local(index));
                         let index = self.trans_operand(bcx, index);
-                        let llindex = index.immediate();
-                        tr_base.project_index(bcx, llindex)
+                        let llindex = self.prepare_index(bcx, index.immediate());
+                        ((tr_base.project_index(bcx, llindex), align), ptr::null_mut())
                     }
                     mir::ProjectionElem::ConstantIndex { offset,
                                                          from_end: false,
                                                          min_length: _ } => {
                         let lloffset = C_usize(bcx.ccx, offset as u64);
-                        tr_base.project_index(bcx, lloffset)
+                        ((tr_base.project_index(bcx, lloffset), align), ptr::null_mut())
                     }
                     mir::ProjectionElem::ConstantIndex { offset,
                                                          from_end: true,
@@ -504,31 +356,39 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                         let lloffset = C_usize(bcx.ccx, offset as u64);
                         let lllen = tr_base.len(bcx.ccx);
                         let llindex = bcx.sub(lllen, lloffset);
-                        tr_base.project_index(bcx, llindex)
+                        ((tr_base.project_index(bcx, llindex), align), ptr::null_mut())
                     }
                     mir::ProjectionElem::Subslice { from, to } => {
-                        let mut subslice = tr_base.project_index(bcx,
-                            C_usize(bcx.ccx, from as u64));
-                        let projected_ty = LvalueTy::Ty { ty: tr_base.layout.ty }
-                            .projection_ty(tcx, &projection.elem).to_ty(bcx.tcx());
-                        subslice.layout = bcx.ccx.layout_of(self.monomorphize(&projected_ty));
-
-                        if subslice.layout.is_unsized() {
-                            assert!(tr_base.has_extra());
-                            subslice.llextra = bcx.sub(tr_base.llextra,
-                                C_usize(bcx.ccx, (from as u64) + (to as u64)));
+                        let llbase = tr_base.project_index(bcx, C_usize(bcx.ccx, from as u64));
+
+                        let base_ty = tr_base.ty.to_ty(bcx.tcx());
+                        match base_ty.sty {
+                            ty::TyArray(..) => {
+                                // must cast the lvalue pointer type to the new
+                                // array type (*[%_; new_len]).
+                                let base_ty = self.monomorphized_lvalue_ty(lvalue);
+                                let llbasety = type_of::type_of(bcx.ccx, base_ty).ptr_to();
+                                let llbase = bcx.pointercast(llbase, llbasety);
+                                ((llbase, align), ptr::null_mut())
+                            }
+                            ty::TySlice(..) => {
+                                assert!(tr_base.llextra != ptr::null_mut());
+                                let lllen = bcx.sub(tr_base.llextra,
+                                                    C_usize(bcx.ccx, (from as u64)+(to as u64)));
+                                ((llbase, align), lllen)
+                            }
+                            _ => bug!("unexpected type {:?} in Subslice", base_ty)
                         }
-
-                        // Cast the lvalue pointer type to the new
-                        // array or slice type (*[%_; new_len]).
-                        subslice.llval = bcx.pointercast(subslice.llval,
-                            subslice.layout.llvm_type(bcx.ccx).ptr_to());
-
-                        subslice
                     }
-                    mir::ProjectionElem::Downcast(_, v) => {
-                        tr_base.project_downcast(bcx, v)
+                    mir::ProjectionElem::Downcast(..) => {
+                        ((tr_base.llval, align), tr_base.llextra)
                     }
+                };
+                LvalueRef {
+                    llval: llprojected,
+                    llextra,
+                    ty: projected_ty,
+                    alignment: align,
                 }
             }
         };
@@ -536,6 +396,22 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         result
     }
 
+    /// Adjust the bitwidth of an index since LLVM is less forgiving
+    /// than we are.
+    ///
+    /// nmatsakis: is this still necessary? Not sure.
+    fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef {
+        let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex));
+        let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.isize_ty());
+        if index_size < int_size {
+            bcx.zext(llindex, bcx.ccx.isize_ty())
+        } else if index_size > int_size {
+            bcx.trunc(llindex, bcx.ccx.isize_ty())
+        } else {
+            llindex
+        }
+    }
+
     pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
         let tcx = self.ccx.tcx();
         let lvalue_ty = lvalue.ty(self.mir, tcx);
diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs
index 7f3a430c418..59da80035fd 100644
--- a/src/librustc_trans/mir/mod.rs
+++ b/src/librustc_trans/mir/mod.rs
@@ -11,18 +11,20 @@
 use libc::c_uint;
 use llvm::{self, ValueRef, BasicBlockRef};
 use llvm::debuginfo::DIScope;
-use rustc::ty::{self, TypeFoldable};
-use rustc::ty::layout::{LayoutOf, TyLayout};
+use rustc::ty::{self, Ty, TypeFoldable};
+use rustc::ty::layout::{self, LayoutTyper};
 use rustc::mir::{self, Mir};
+use rustc::mir::tcx::LvalueTy;
 use rustc::ty::subst::Substs;
 use rustc::infer::TransNormalize;
 use rustc::session::config::FullDebugInfo;
 use base;
 use builder::Builder;
-use common::{CrateContext, Funclet};
+use common::{self, CrateContext, Funclet};
 use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
 use monomorphize::Instance;
-use abi::{ArgAttribute, FnType, PassMode};
+use abi::{ArgAttribute, FnType};
+use type_of;
 
 use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
 use syntax::symbol::keywords;
@@ -59,7 +61,7 @@ pub struct MirContext<'a, 'tcx:'a> {
     /// don't really care about it very much. Anyway, this value
     /// contains an alloca into which the personality is stored and
     /// then later loaded when generating the DIVERGE_BLOCK.
-    personality_slot: Option<LvalueRef<'tcx>>,
+    llpersonalityslot: Option<ValueRef>,
 
     /// A `Block` for each MIR `BasicBlock`
     blocks: IndexVec<mir::BasicBlock, BasicBlockRef>,
@@ -84,7 +86,7 @@ pub struct MirContext<'a, 'tcx:'a> {
     /// directly using an `OperandRef`, which makes for tighter LLVM
     /// IR. The conditions for using an `OperandRef` are as follows:
     ///
-    /// - the type of the local must be judged "immediate" by `is_llvm_immediate`
+    /// - the type of the local must be judged "immediate" by `type_is_immediate`
     /// - the operand must never be referenced indirectly
     ///     - we should not take its address using the `&` operator
     ///     - nor should it appear in an lvalue path like `tmp.a`
@@ -175,13 +177,14 @@ enum LocalRef<'tcx> {
     Operand(Option<OperandRef<'tcx>>),
 }
 
-impl<'a, 'tcx> LocalRef<'tcx> {
-    fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'tcx> {
-        if layout.is_zst() {
+impl<'tcx> LocalRef<'tcx> {
+    fn new_operand<'a>(ccx: &CrateContext<'a, 'tcx>,
+                       ty: Ty<'tcx>) -> LocalRef<'tcx> {
+        if common::type_is_zero_size(ccx, ty) {
             // Zero-size temporaries aren't always initialized, which
             // doesn't matter because they don't contain data, but
             // we need something in the operand.
-            LocalRef::Operand(Some(OperandRef::new_zst(ccx, layout)))
+            LocalRef::Operand(Some(OperandRef::new_zst(ccx, ty)))
         } else {
             LocalRef::Operand(None)
         }
@@ -229,7 +232,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
         llfn,
         fn_ty,
         ccx,
-        personality_slot: None,
+        llpersonalityslot: None,
         blocks: block_bcxs,
         unreachable_block: None,
         cleanup_kinds,
@@ -252,8 +255,7 @@ pub fn trans_mir<'a, 'tcx: 'a>(
 
         let mut allocate_local = |local| {
             let decl = &mir.local_decls[local];
-            let layout = bcx.ccx.layout_of(mircx.monomorphize(&decl.ty));
-            assert!(!layout.ty.has_erasable_regions());
+            let ty = mircx.monomorphize(&decl.ty);
 
             if let Some(name) = decl.name {
                 // User variable
@@ -262,14 +264,15 @@ pub fn trans_mir<'a, 'tcx: 'a>(
 
                 if !lvalue_locals.contains(local.index()) && !dbg {
                     debug!("alloc: {:?} ({}) -> operand", local, name);
-                    return LocalRef::new_operand(bcx.ccx, layout);
+                    return LocalRef::new_operand(bcx.ccx, ty);
                 }
 
                 debug!("alloc: {:?} ({}) -> lvalue", local, name);
-                let lvalue = LvalueRef::alloca(&bcx, layout, &name.as_str());
+                assert!(!ty.has_erasable_regions());
+                let lvalue = LvalueRef::alloca(&bcx, ty, &name.as_str());
                 if dbg {
                     let (scope, span) = mircx.debug_loc(decl.source_info);
-                    declare_local(&bcx, &mircx.debug_context, name, layout.ty, scope,
+                    declare_local(&bcx, &mircx.debug_context, name, ty, scope,
                         VariableAccess::DirectVariable { alloca: lvalue.llval },
                         VariableKind::LocalVariable, span);
                 }
@@ -279,18 +282,18 @@ pub fn trans_mir<'a, 'tcx: 'a>(
                 if local == mir::RETURN_POINTER && mircx.fn_ty.ret.is_indirect() {
                     debug!("alloc: {:?} (return pointer) -> lvalue", local);
                     let llretptr = llvm::get_param(llfn, 0);
-                    LocalRef::Lvalue(LvalueRef::new_sized(llretptr,
-                                                          layout,
+                    LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty),
                                                           Alignment::AbiAligned))
                 } else if lvalue_locals.contains(local.index()) {
                     debug!("alloc: {:?} -> lvalue", local);
-                    LocalRef::Lvalue(LvalueRef::alloca(&bcx, layout, &format!("{:?}", local)))
+                    assert!(!ty.has_erasable_regions());
+                    LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty,  &format!("{:?}", local)))
                 } else {
                     // If this is an immediate local, we do not create an
                     // alloca in advance. Instead we wait until we see the
                     // definition and update the operand there.
                     debug!("alloc: {:?} -> operand", local);
-                    LocalRef::new_operand(bcx.ccx, layout)
+                    LocalRef::new_operand(bcx.ccx, ty)
                 }
             }
         };
@@ -381,6 +384,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
 
     mir.args_iter().enumerate().map(|(arg_index, local)| {
         let arg_decl = &mir.local_decls[local];
+        let arg_ty = mircx.monomorphize(&arg_decl.ty);
 
         let name = if let Some(name) = arg_decl.name {
             name.as_str().to_string()
@@ -394,17 +398,26 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             // to reconstruct it into a tuple local variable, from multiple
             // individual LLVM function arguments.
 
-            let arg_ty = mircx.monomorphize(&arg_decl.ty);
             let tupled_arg_tys = match arg_ty.sty {
                 ty::TyTuple(ref tys, _) => tys,
                 _ => bug!("spread argument isn't a tuple?!")
             };
 
-            let lvalue = LvalueRef::alloca(bcx, bcx.ccx.layout_of(arg_ty), &name);
-            for i in 0..tupled_arg_tys.len() {
+            let lvalue = LvalueRef::alloca(bcx, arg_ty, &name);
+            for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
+                let (dst, _) = lvalue.trans_field_ptr(bcx, i);
                 let arg = &mircx.fn_ty.args[idx];
                 idx += 1;
-                arg.store_fn_arg(bcx, &mut llarg_idx, lvalue.project_field(bcx, i));
+                if common::type_is_fat_ptr(bcx.ccx, tupled_arg_ty) {
+                    // We pass fat pointers as two words, but inside the tuple
+                    // they are the two sub-fields of a single aggregate field.
+                    let meta = &mircx.fn_ty.args[idx];
+                    idx += 1;
+                    arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, dst));
+                    meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, dst));
+                } else {
+                    arg.store_fn_arg(bcx, &mut llarg_idx, dst);
+                }
             }
 
             // Now that we have one alloca that contains the aggregate value,
@@ -429,56 +442,82 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
 
         let arg = &mircx.fn_ty.args[idx];
         idx += 1;
-        if arg.pad.is_some() {
+        let llval = if arg.is_indirect() {
+            // Don't copy an indirect argument to an alloca, the caller
+            // already put it in a temporary alloca and gave it up
+            // FIXME: lifetimes
+            if arg.pad.is_some() {
+                llarg_idx += 1;
+            }
+            let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
+            bcx.set_value_name(llarg, &name);
             llarg_idx += 1;
-        }
+            llarg
+        } else if !lvalue_locals.contains(local.index()) &&
+                  arg.cast.is_none() && arg_scope.is_none() {
+            if arg.is_ignore() {
+                return LocalRef::new_operand(bcx.ccx, arg_ty);
+            }
 
-        if arg_scope.is_none() && !lvalue_locals.contains(local.index()) {
             // We don't have to cast or keep the argument in the alloca.
             // FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
             // of putting everything in allocas just so we can use llvm.dbg.declare.
-            let local = |op| LocalRef::Operand(Some(op));
-            match arg.mode {
-                PassMode::Ignore => {
-                    return local(OperandRef::new_zst(bcx.ccx, arg.layout));
-                }
-                PassMode::Direct(_) => {
-                    let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-                    bcx.set_value_name(llarg, &name);
-                    llarg_idx += 1;
-                    return local(
-                        OperandRef::from_immediate_or_packed_pair(bcx, llarg, arg.layout));
-                }
-                PassMode::Pair(..) => {
-                    let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-                    bcx.set_value_name(a, &(name.clone() + ".0"));
-                    llarg_idx += 1;
-
-                    let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-                    bcx.set_value_name(b, &(name + ".1"));
-                    llarg_idx += 1;
-
-                    return local(OperandRef {
-                        val: OperandValue::Pair(a, b),
-                        layout: arg.layout
-                    });
-                }
-                _ => {}
+            if arg.pad.is_some() {
+                llarg_idx += 1;
             }
-        }
-
-        let lvalue = if arg.is_indirect() {
-            // Don't copy an indirect argument to an alloca, the caller
-            // already put it in a temporary alloca and gave it up.
-            // FIXME: lifetimes
             let llarg = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
-            bcx.set_value_name(llarg, &name);
             llarg_idx += 1;
-            LvalueRef::new_sized(llarg, arg.layout, Alignment::AbiAligned)
+            let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
+                let meta = &mircx.fn_ty.args[idx];
+                idx += 1;
+                assert_eq!((meta.cast, meta.pad), (None, None));
+                let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
+                llarg_idx += 1;
+
+                // FIXME(eddyb) As we can't perfectly represent the data and/or
+                // vtable pointer in a fat pointers in Rust's typesystem, and
+                // because we split fat pointers into two ArgType's, they're
+                // not the right type so we have to cast them for now.
+                let pointee = match arg_ty.sty {
+                    ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
+                    ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => ty,
+                    ty::TyAdt(def, _) if def.is_box() => arg_ty.boxed_ty(),
+                    _ => bug!()
+                };
+                let data_llty = type_of::in_memory_type_of(bcx.ccx, pointee);
+                let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee);
+
+                let llarg = bcx.pointercast(llarg, data_llty.ptr_to());
+                bcx.set_value_name(llarg, &(name.clone() + ".ptr"));
+                let llmeta = bcx.pointercast(llmeta, meta_llty);
+                bcx.set_value_name(llmeta, &(name + ".meta"));
+
+                OperandValue::Pair(llarg, llmeta)
+            } else {
+                bcx.set_value_name(llarg, &name);
+                OperandValue::Immediate(llarg)
+            };
+            let operand = OperandRef {
+                val,
+                ty: arg_ty
+            };
+            return LocalRef::Operand(Some(operand.unpack_if_pair(bcx)));
         } else {
-            let tmp = LvalueRef::alloca(bcx, arg.layout, &name);
-            arg.store_fn_arg(bcx, &mut llarg_idx, tmp);
-            tmp
+            let lltemp = LvalueRef::alloca(bcx, arg_ty, &name);
+            if common::type_is_fat_ptr(bcx.ccx, arg_ty) {
+                // we pass fat pointers as two words, but we want to
+                // represent them internally as a pointer to two words,
+                // so make an alloca to store them in.
+                let meta = &mircx.fn_ty.args[idx];
+                idx += 1;
+                arg.store_fn_arg(bcx, &mut llarg_idx, base::get_dataptr(bcx, lltemp.llval));
+                meta.store_fn_arg(bcx, &mut llarg_idx, base::get_meta(bcx, lltemp.llval));
+            } else  {
+                // otherwise, arg is passed by value, so make a
+                // temporary and store it there
+                arg.store_fn_arg(bcx, &mut llarg_idx, lltemp.llval);
+            }
+            lltemp.llval
         };
         arg_scope.map(|scope| {
             // Is this a regular argument?
@@ -486,24 +525,21 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                 // The Rust ABI passes indirect variables using a pointer and a manual copy, so we
                 // need to insert a deref here, but the C ABI uses a pointer and a copy using the
                 // byval attribute, for which LLVM does the deref itself, so we must not add it.
-                let mut variable_access = VariableAccess::DirectVariable {
-                    alloca: lvalue.llval
-                };
-
-                if let PassMode::Indirect(ref attrs) = arg.mode {
-                    if !attrs.contains(ArgAttribute::ByVal) {
-                        variable_access = VariableAccess::IndirectVariable {
-                            alloca: lvalue.llval,
-                            address_operations: &deref_op,
-                        };
+                let variable_access = if arg.is_indirect() &&
+                    !arg.attrs.contains(ArgAttribute::ByVal) {
+                    VariableAccess::IndirectVariable {
+                        alloca: llval,
+                        address_operations: &deref_op,
                     }
-                }
+                } else {
+                    VariableAccess::DirectVariable { alloca: llval }
+                };
 
                 declare_local(
                     bcx,
                     &mircx.debug_context,
                     arg_decl.name.unwrap_or(keywords::Invalid.name()),
-                    arg.layout.ty,
+                    arg_ty,
                     scope,
                     variable_access,
                     VariableKind::ArgumentVariable(arg_index + 1),
@@ -513,15 +549,15 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             }
 
             // Or is it the closure environment?
-            let (closure_layout, env_ref) = match arg.layout.ty.sty {
-                ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (bcx.ccx.layout_of(mt.ty), true),
-                _ => (arg.layout, false)
+            let (closure_ty, env_ref) = match arg_ty.sty {
+                ty::TyRef(_, mt) | ty::TyRawPtr(mt) => (mt.ty, true),
+                _ => (arg_ty, false)
             };
 
-            let upvar_tys = match closure_layout.ty.sty {
+            let upvar_tys = match closure_ty.sty {
                 ty::TyClosure(def_id, substs) |
                 ty::TyGenerator(def_id, substs, _) => substs.upvar_tys(def_id, tcx),
-                _ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_layout.ty)
+                _ => bug!("upvar_decls with non-closure arg0 type `{}`", closure_ty)
             };
 
             // Store the pointer to closure data in an alloca for debuginfo
@@ -532,17 +568,21 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
             // doesn't actually strip the offset when splitting the closure
             // environment into its components so it ends up out of bounds.
             let env_ptr = if !env_ref {
-                let alloc = LvalueRef::alloca(bcx,
-                    bcx.ccx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
-                    "__debuginfo_env_ptr");
-                bcx.store(lvalue.llval, alloc.llval, None);
-                alloc.llval
+                let alloc = bcx.alloca(common::val_ty(llval), "__debuginfo_env_ptr", None);
+                bcx.store(llval, alloc, None);
+                alloc
             } else {
-                lvalue.llval
+                llval
+            };
+
+            let layout = bcx.ccx.layout_of(closure_ty);
+            let offsets = match *layout {
+                layout::Univariant { ref variant, .. } => &variant.offsets[..],
+                _ => bug!("Closures are only supposed to be Univariant")
             };
 
             for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
-                let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes();
+                let byte_offset_of_var_in_env = offsets[i].bytes();
 
                 let ops = unsafe {
                     [llvm::LLVMRustDIBuilderCreateOpDeref(),
@@ -580,7 +620,8 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
                 );
             }
         });
-        LocalRef::Lvalue(lvalue)
+        LocalRef::Lvalue(LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty),
+                                              Alignment::AbiAligned))
     }).collect()
 }
 
@@ -588,6 +629,6 @@ mod analyze;
 mod block;
 mod constant;
 pub mod lvalue;
-pub mod operand;
+mod operand;
 mod rvalue;
 mod statement;
diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs
index 8c43bded1bf..9ce1749190b 100644
--- a/src/librustc_trans/mir/operand.rs
+++ b/src/librustc_trans/mir/operand.rs
@@ -9,16 +9,18 @@
 // except according to those terms.
 
 use llvm::ValueRef;
-use rustc::ty;
-use rustc::ty::layout::{self, LayoutOf, TyLayout};
+use rustc::ty::{self, Ty};
+use rustc::ty::layout::{Layout, LayoutTyper};
 use rustc::mir;
+use rustc::mir::tcx::LvalueTy;
 use rustc_data_structures::indexed_vec::Idx;
 
+use adt;
 use base;
-use common::{self, CrateContext, C_undef, C_usize};
+use common::{self, CrateContext, C_null};
 use builder::Builder;
 use value::Value;
-use type_of::LayoutLlvmExt;
+use type_of;
 use type_::Type;
 
 use std::fmt;
@@ -41,52 +43,63 @@ pub enum OperandValue {
     Pair(ValueRef, ValueRef)
 }
 
-impl fmt::Debug for OperandValue {
-    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        match *self {
-            OperandValue::Ref(r, align) => {
-                write!(f, "Ref({:?}, {:?})", Value(r), align)
-            }
-            OperandValue::Immediate(i) => {
-                write!(f, "Immediate({:?})", Value(i))
-            }
-            OperandValue::Pair(a, b) => {
-                write!(f, "Pair({:?}, {:?})", Value(a), Value(b))
-            }
-        }
-    }
-}
-
 /// An `OperandRef` is an "SSA" reference to a Rust value, along with
 /// its type.
 ///
 /// NOTE: unless you know a value's type exactly, you should not
 /// generate LLVM opcodes acting on it and instead act via methods,
-/// to avoid nasty edge cases. In particular, using `Builder::store`
-/// directly is sure to cause problems -- use `OperandRef::store`
+/// to avoid nasty edge cases. In particular, using `Builder.store`
+/// directly is sure to cause problems -- use `MirContext.store_operand`
 /// instead.
 #[derive(Copy, Clone)]
 pub struct OperandRef<'tcx> {
     // The value.
     pub val: OperandValue,
 
-    // The layout of value, based on its Rust type.
-    pub layout: TyLayout<'tcx>,
+    // The type of value being returned.
+    pub ty: Ty<'tcx>
 }
 
 impl<'tcx> fmt::Debug for OperandRef<'tcx> {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
-        write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
+        match self.val {
+            OperandValue::Ref(r, align) => {
+                write!(f, "OperandRef(Ref({:?}, {:?}) @ {:?})",
+                       Value(r), align, self.ty)
+            }
+            OperandValue::Immediate(i) => {
+                write!(f, "OperandRef(Immediate({:?}) @ {:?})",
+                       Value(i), self.ty)
+            }
+            OperandValue::Pair(a, b) => {
+                write!(f, "OperandRef(Pair({:?}, {:?}) @ {:?})",
+                       Value(a), Value(b), self.ty)
+            }
+        }
     }
 }
 
 impl<'a, 'tcx> OperandRef<'tcx> {
     pub fn new_zst(ccx: &CrateContext<'a, 'tcx>,
-                   layout: TyLayout<'tcx>) -> OperandRef<'tcx> {
-        assert!(layout.is_zst());
+                   ty: Ty<'tcx>) -> OperandRef<'tcx> {
+        assert!(common::type_is_zero_size(ccx, ty));
+        let llty = type_of::type_of(ccx, ty);
+        let val = if common::type_is_imm_pair(ccx, ty) {
+            let layout = ccx.layout_of(ty);
+            let (ix0, ix1) = if let Layout::Univariant { ref variant, .. } = *layout {
+                (adt::struct_llfields_index(variant, 0),
+                adt::struct_llfields_index(variant, 1))
+            } else {
+                (0, 1)
+            };
+            let fields = llty.field_types();
+            OperandValue::Pair(C_null(fields[ix0]), C_null(fields[ix1]))
+        } else {
+            OperandValue::Immediate(C_null(llty))
+        };
         OperandRef {
-            val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(ccx))),
-            layout
+            val,
+            ty,
         }
     }
 
@@ -99,8 +112,8 @@ impl<'a, 'tcx> OperandRef<'tcx> {
         }
     }
 
-    pub fn deref(self, ccx: &CrateContext<'a, 'tcx>) -> LvalueRef<'tcx> {
-        let projected_ty = self.layout.ty.builtin_deref(true, ty::NoPreference)
+    pub fn deref(self) -> LvalueRef<'tcx> {
+        let projected_ty = self.ty.builtin_deref(true, ty::NoPreference)
             .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty;
         let (llptr, llextra) = match self.val {
             OperandValue::Immediate(llptr) => (llptr, ptr::null_mut()),
@@ -110,150 +123,126 @@ impl<'a, 'tcx> OperandRef<'tcx> {
         LvalueRef {
             llval: llptr,
             llextra,
-            layout: ccx.layout_of(projected_ty),
+            ty: LvalueTy::from_ty(projected_ty),
             alignment: Alignment::AbiAligned,
         }
     }
 
-    /// If this operand is a `Pair`, we return an aggregate with the two values.
-    /// For other cases, see `immediate`.
-    pub fn immediate_or_packed_pair(self, bcx: &Builder<'a, 'tcx>) -> ValueRef {
+    /// If this operand is a Pair, we return an
+    /// Immediate aggregate with the two values.
+    pub fn pack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
         if let OperandValue::Pair(a, b) = self.val {
-            let llty = self.layout.llvm_type(bcx.ccx);
-            debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
-                   self, llty);
             // Reconstruct the immediate aggregate.
-            let mut llpair = C_undef(llty);
-            llpair = bcx.insert_value(llpair, a, 0);
-            llpair = bcx.insert_value(llpair, b, 1);
-            llpair
-        } else {
-            self.immediate()
+            let llty = type_of::type_of(bcx.ccx, self.ty);
+            let mut llpair = common::C_undef(llty);
+            let elems = [a, b];
+            for i in 0..2 {
+                let mut elem = elems[i];
+                // Extend boolean i1's to i8.
+                if common::val_ty(elem) == Type::i1(bcx.ccx) {
+                    elem = bcx.zext(elem, Type::i8(bcx.ccx));
+                }
+                let layout = bcx.ccx.layout_of(self.ty);
+                let i = if let Layout::Univariant { ref variant, .. } = *layout {
+                    adt::struct_llfields_index(variant, i)
+                } else {
+                    i
+                };
+                llpair = bcx.insert_value(llpair, elem, i);
+            }
+            self.val = OperandValue::Immediate(llpair);
         }
+        self
     }
 
-    /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
-    pub fn from_immediate_or_packed_pair(bcx: &Builder<'a, 'tcx>,
-                                         llval: ValueRef,
-                                         layout: TyLayout<'tcx>)
-                                         -> OperandRef<'tcx> {
-        let val = if layout.is_llvm_scalar_pair() {
-            debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}",
-                    llval, layout);
-
+    /// If this operand is a pair in an Immediate,
+    /// we return a Pair with the two halves.
+    pub fn unpack_if_pair(mut self, bcx: &Builder<'a, 'tcx>) -> OperandRef<'tcx> {
+        if let OperandValue::Immediate(llval) = self.val {
             // Deconstruct the immediate aggregate.
-            OperandValue::Pair(bcx.extract_value(llval, 0),
-                               bcx.extract_value(llval, 1))
-        } else {
-            OperandValue::Immediate(llval)
-        };
-        OperandRef { val, layout }
-    }
+            if common::type_is_imm_pair(bcx.ccx, self.ty) {
+                debug!("Operand::unpack_if_pair: unpacking {:?}", self);
 
-    pub fn extract_field(&self, bcx: &Builder<'a, 'tcx>, i: usize) -> OperandRef<'tcx> {
-        let field = self.layout.field(bcx.ccx, i);
-        let offset = self.layout.fields.offset(i);
-
-        let mut val = match (self.val, &self.layout.abi) {
-            // If we're uninhabited, or the field is ZST, it has no data.
-            _ if self.layout.abi == layout::Abi::Uninhabited || field.is_zst() => {
-                return OperandRef {
-                    val: OperandValue::Immediate(C_undef(field.immediate_llvm_type(bcx.ccx))),
-                    layout: field
+                let layout = bcx.ccx.layout_of(self.ty);
+                let (ix0, ix1) = if let Layout::Univariant { ref variant, .. } = *layout {
+                    (adt::struct_llfields_index(variant, 0),
+                    adt::struct_llfields_index(variant, 1))
+                } else {
+                    (0, 1)
                 };
-            }
 
-            // Newtype of a scalar or scalar pair.
-            (OperandValue::Immediate(_), _) |
-            (OperandValue::Pair(..), _) if field.size == self.layout.size => {
-                assert_eq!(offset.bytes(), 0);
-                self.val
-            }
+                let mut a = bcx.extract_value(llval, ix0);
+                let mut b = bcx.extract_value(llval, ix1);
 
-            // Extract a scalar component from a pair.
-            (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => {
-                if offset.bytes() == 0 {
-                    assert_eq!(field.size, a.value.size(bcx.ccx));
-                    OperandValue::Immediate(a_llval)
-                } else {
-                    assert_eq!(offset, a.value.size(bcx.ccx)
-                        .abi_align(b.value.align(bcx.ccx)));
-                    assert_eq!(field.size, b.value.size(bcx.ccx));
-                    OperandValue::Immediate(b_llval)
+                let pair_fields = common::type_pair_fields(bcx.ccx, self.ty);
+                if let Some([a_ty, b_ty]) = pair_fields {
+                    if a_ty.is_bool() {
+                        a = bcx.trunc(a, Type::i1(bcx.ccx));
+                    }
+                    if b_ty.is_bool() {
+                        b = bcx.trunc(b, Type::i1(bcx.ccx));
+                    }
                 }
-            }
-
-            // `#[repr(simd)]` types are also immediate.
-            (OperandValue::Immediate(llval), &layout::Abi::Vector) => {
-                OperandValue::Immediate(
-                    bcx.extract_element(llval, C_usize(bcx.ccx, i as u64)))
-            }
-
-            _ => bug!("OperandRef::extract_field({:?}): not applicable", self)
-        };
 
-        // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
-        match val {
-            OperandValue::Immediate(ref mut llval) => {
-                *llval = bcx.bitcast(*llval, field.immediate_llvm_type(bcx.ccx));
-            }
-            OperandValue::Pair(ref mut a, ref mut b) => {
-                *a = bcx.bitcast(*a, field.scalar_pair_element_llvm_type(bcx.ccx, 0));
-                *b = bcx.bitcast(*b, field.scalar_pair_element_llvm_type(bcx.ccx, 1));
+                self.val = OperandValue::Pair(a, b);
             }
-            OperandValue::Ref(..) => bug!()
-        }
-
-        OperandRef {
-            val,
-            layout: field
         }
+        self
     }
 }
 
-impl<'a, 'tcx> OperandValue {
-    pub fn store(self, bcx: &Builder<'a, 'tcx>, dest: LvalueRef<'tcx>) {
-        debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
-        // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
-        // value is through `undef`, and store itself is useless.
-        if dest.layout.is_zst() {
-            return;
-        }
-        match self {
-            OperandValue::Ref(r, source_align) =>
-                base::memcpy_ty(bcx, dest.llval, r, dest.layout,
-                                (source_align | dest.alignment).non_abi()),
-            OperandValue::Immediate(s) => {
-                bcx.store(base::from_immediate(bcx, s), dest.llval, dest.alignment.non_abi());
-            }
-            OperandValue::Pair(a, b) => {
-                for (i, &x) in [a, b].iter().enumerate() {
-                    let mut llptr = bcx.struct_gep(dest.llval, i as u64);
-                    // Make sure to always store i1 as i8.
-                    if common::val_ty(x) == Type::i1(bcx.ccx) {
-                        llptr = bcx.pointercast(llptr, Type::i8p(bcx.ccx));
-                    }
-                    bcx.store(base::from_immediate(bcx, x), llptr, dest.alignment.non_abi());
-                }
-            }
-        }
+impl<'a, 'tcx> MirContext<'a, 'tcx> {
+    pub fn trans_load(&mut self,
+                      bcx: &Builder<'a, 'tcx>,
+                      llval: ValueRef,
+                      align: Alignment,
+                      ty: Ty<'tcx>)
+                      -> OperandRef<'tcx>
+    {
+        debug!("trans_load: {:?} @ {:?}", Value(llval), ty);
+
+        let val = if common::type_is_fat_ptr(bcx.ccx, ty) {
+            let (lldata, llextra) = base::load_fat_ptr(bcx, llval, align, ty);
+            OperandValue::Pair(lldata, llextra)
+        } else if common::type_is_imm_pair(bcx.ccx, ty) {
+            let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(ty) {
+                Layout::Univariant { ref variant, .. } => {
+                    (adt::struct_llfields_index(variant, 0),
+                    adt::struct_llfields_index(variant, 1),
+                    Alignment::from_packed(variant.packed) | align)
+                },
+                _ => (0, 1, align)
+            };
+            let [a_ty, b_ty] = common::type_pair_fields(bcx.ccx, ty).unwrap();
+            let a_ptr = bcx.struct_gep(llval, ix0);
+            let b_ptr = bcx.struct_gep(llval, ix1);
+
+            OperandValue::Pair(
+                base::load_ty(bcx, a_ptr, f_align, a_ty),
+                base::load_ty(bcx, b_ptr, f_align, b_ty)
+            )
+        } else if common::type_is_immediate(bcx.ccx, ty) {
+            OperandValue::Immediate(base::load_ty(bcx, llval, align, ty))
+        } else {
+            OperandValue::Ref(llval, align)
+        };
+
+        OperandRef { val: val, ty: ty }
     }
-}
 
-impl<'a, 'tcx> MirContext<'a, 'tcx> {
-    fn maybe_trans_consume_direct(&mut self,
-                                  bcx: &Builder<'a, 'tcx>,
-                                  lvalue: &mir::Lvalue<'tcx>)
-                                   -> Option<OperandRef<'tcx>>
+    pub fn trans_consume(&mut self,
+                         bcx: &Builder<'a, 'tcx>,
+                         lvalue: &mir::Lvalue<'tcx>)
+                         -> OperandRef<'tcx>
     {
-        debug!("maybe_trans_consume_direct(lvalue={:?})", lvalue);
+        debug!("trans_consume(lvalue={:?})", lvalue);
 
         // watch out for locals that do not have an
         // alloca; they are handled somewhat differently
         if let mir::Lvalue::Local(index) = *lvalue {
             match self.locals[index] {
                 LocalRef::Operand(Some(o)) => {
-                    return Some(o);
+                    return o;
                 }
                 LocalRef::Operand(None) => {
                     bug!("use of {:?} before def", lvalue);
@@ -264,40 +253,33 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             }
         }
 
-        // Moves out of scalar and scalar pair fields are trivial.
+        // Moves out of pair fields are trivial.
         if let &mir::Lvalue::Projection(ref proj) = lvalue {
-            if let mir::ProjectionElem::Field(ref f, _) = proj.elem {
-                if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) {
-                    return Some(o.extract_field(bcx, f.index()));
+            if let mir::Lvalue::Local(index) = proj.base {
+                if let LocalRef::Operand(Some(o)) = self.locals[index] {
+                    match (o.val, &proj.elem) {
+                        (OperandValue::Pair(a, b),
+                         &mir::ProjectionElem::Field(ref f, ty)) => {
+                            let llval = [a, b][f.index()];
+                            let op = OperandRef {
+                                val: OperandValue::Immediate(llval),
+                                ty: self.monomorphize(&ty)
+                            };
+
+                            // Handle nested pairs.
+                            return op.unpack_if_pair(bcx);
+                        }
+                        _ => {}
+                    }
                 }
             }
         }
 
-        None
-    }
-
-    pub fn trans_consume(&mut self,
-                         bcx: &Builder<'a, 'tcx>,
-                         lvalue: &mir::Lvalue<'tcx>)
-                         -> OperandRef<'tcx>
-    {
-        debug!("trans_consume(lvalue={:?})", lvalue);
-
-        let ty = self.monomorphized_lvalue_ty(lvalue);
-        let layout = bcx.ccx.layout_of(ty);
-
-        // ZSTs don't require any actual memory access.
-        if layout.is_zst() {
-            return OperandRef::new_zst(bcx.ccx, layout);
-        }
-
-        if let Some(o) = self.maybe_trans_consume_direct(bcx, lvalue) {
-            return o;
-        }
-
         // for most lvalues, to consume them we just load them
         // out from their home
-        self.trans_lvalue(bcx, lvalue).load(bcx)
+        let tr_lvalue = self.trans_lvalue(bcx, lvalue);
+        let ty = tr_lvalue.ty.to_ty(bcx.tcx());
+        self.trans_load(bcx, tr_lvalue.llval, tr_lvalue.alignment, ty)
     }
 
     pub fn trans_operand(&mut self,
@@ -317,11 +299,60 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 let operand = val.to_operand(bcx.ccx);
                 if let OperandValue::Ref(ptr, align) = operand.val {
                     // If this is a OperandValue::Ref to an immediate constant, load it.
-                    LvalueRef::new_sized(ptr, operand.layout, align).load(bcx)
+                    self.trans_load(bcx, ptr, align, operand.ty)
                 } else {
                     operand
                 }
             }
         }
     }
+
+    pub fn store_operand(&mut self,
+                         bcx: &Builder<'a, 'tcx>,
+                         lldest: ValueRef,
+                         align: Option<u32>,
+                         operand: OperandRef<'tcx>) {
+        debug!("store_operand: operand={:?}, align={:?}", operand, align);
+        // Avoid generating stores of zero-sized values, because the only way to have a zero-sized
+        // value is through `undef`, and store itself is useless.
+        if common::type_is_zero_size(bcx.ccx, operand.ty) {
+            return;
+        }
+        match operand.val {
+            OperandValue::Ref(r, Alignment::Packed) =>
+                base::memcpy_ty(bcx, lldest, r, operand.ty, Some(1)),
+            OperandValue::Ref(r, Alignment::AbiAligned) =>
+                base::memcpy_ty(bcx, lldest, r, operand.ty, align),
+            OperandValue::Immediate(s) => {
+                bcx.store(base::from_immediate(bcx, s), lldest, align);
+            }
+            OperandValue::Pair(a, b) => {
+                let (ix0, ix1, f_align) = match *bcx.ccx.layout_of(operand.ty) {
+                    Layout::Univariant { ref variant, .. } => {
+                        (adt::struct_llfields_index(variant, 0),
+                        adt::struct_llfields_index(variant, 1),
+                        if variant.packed { Some(1) } else { None })
+                    }
+                    _ => (0, 1, align)
+                };
+
+                let a = base::from_immediate(bcx, a);
+                let b = base::from_immediate(bcx, b);
+
+                // See comment above about zero-sized values.
+                let (a_zst, b_zst) = common::type_pair_fields(bcx.ccx, operand.ty)
+                    .map_or((false, false), |[a_ty, b_ty]| {
+                        (common::type_is_zero_size(bcx.ccx, a_ty),
+                         common::type_is_zero_size(bcx.ccx, b_ty))
+                    });
+
+                if !a_zst {
+                    bcx.store(a, bcx.struct_gep(lldest, ix0), f_align);
+                }
+                if !b_zst {
+                    bcx.store(b, bcx.struct_gep(lldest, ix1), f_align);
+                }
+            }
+        }
+    }
 }
diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs
index 4781425f491..7e187a85867 100644
--- a/src/librustc_trans/mir/rvalue.rs
+++ b/src/librustc_trans/mir/rvalue.rs
@@ -11,7 +11,8 @@
 use llvm::{self, ValueRef};
 use rustc::ty::{self, Ty};
 use rustc::ty::cast::{CastTy, IntTy};
-use rustc::ty::layout::{self, LayoutOf};
+use rustc::ty::layout::{Layout, LayoutTyper};
+use rustc::mir::tcx::LvalueTy;
 use rustc::mir;
 use rustc::middle::lang_items::ExchangeMallocFnLangItem;
 use rustc_apfloat::{ieee, Float, Status, Round};
@@ -21,12 +22,14 @@ use std::{u128, i128};
 use base;
 use builder::Builder;
 use callee;
-use common::{self, val_ty};
-use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_uint_big};
+use common::{self, val_ty, C_bool, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral};
 use consts;
+use adt;
+use machine;
 use monomorphize;
 use type_::Type;
-use type_of::LayoutLlvmExt;
+use type_of;
+use tvec;
 use value::Value;
 
 use super::{MirContext, LocalRef};
@@ -49,18 +52,18 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                let tr_operand = self.trans_operand(&bcx, operand);
                // FIXME: consider not copying constants through stack. (fixable by translating
                // constants into OperandValue::Ref, why don’t we do that yet if we don’t?)
-               tr_operand.val.store(&bcx, dest);
+               self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), tr_operand);
                bcx
            }
 
-            mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => {
-                // The destination necessarily contains a fat pointer, so if
-                // it's a scalar pair, it's a fat pointer or newtype thereof.
-                if dest.layout.is_llvm_scalar_pair() {
+            mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
+                let cast_ty = self.monomorphize(&cast_ty);
+
+                if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
                     // into-coerce of a thin pointer to a fat pointer - just
                     // use the operand path.
                     let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
-                    temp.val.store(&bcx, dest);
+                    self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
                     return bcx;
                 }
 
@@ -69,9 +72,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 // `CoerceUnsized` can be passed by a where-clause,
                 // so the (generic) MIR may not be able to expand it.
                 let operand = self.trans_operand(&bcx, source);
-                match operand.val {
-                    OperandValue::Pair(..) |
-                    OperandValue::Immediate(_) => {
+                let operand = operand.pack_if_pair(&bcx);
+                let llref = match operand.val {
+                    OperandValue::Pair(..) => bug!(),
+                    OperandValue::Immediate(llval) => {
                         // unsize from an immediate structure. We don't
                         // really need a temporary alloca here, but
                         // avoiding it would require us to have
@@ -79,93 +83,106 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                         // index into the struct, and this case isn't
                         // important enough for it.
                         debug!("trans_rvalue: creating ugly alloca");
-                        let scratch = LvalueRef::alloca(&bcx, operand.layout, "__unsize_temp");
-                        scratch.storage_live(&bcx);
-                        operand.val.store(&bcx, scratch);
-                        base::coerce_unsized_into(&bcx, scratch, dest);
-                        scratch.storage_dead(&bcx);
+                        let scratch = LvalueRef::alloca(&bcx, operand.ty, "__unsize_temp");
+                        base::store_ty(&bcx, llval, scratch.llval, scratch.alignment, operand.ty);
+                        scratch
                     }
                     OperandValue::Ref(llref, align) => {
-                        let source = LvalueRef::new_sized(llref, operand.layout, align);
-                        base::coerce_unsized_into(&bcx, source, dest);
+                        LvalueRef::new_sized_ty(llref, operand.ty, align)
                     }
-                }
+                };
+                base::coerce_unsized_into(&bcx, &llref, &dest);
                 bcx
             }
 
             mir::Rvalue::Repeat(ref elem, count) => {
-                let tr_elem = self.trans_operand(&bcx, elem);
+                let dest_ty = dest.ty.to_ty(bcx.tcx());
 
-                // Do not generate the loop for zero-sized elements or empty arrays.
-                if dest.layout.is_zst() {
+                // No need to inizialize memory of a zero-sized slice
+                if common::type_is_zero_size(bcx.ccx, dest_ty) {
                     return bcx;
                 }
 
-                let start = dest.project_index(&bcx, C_usize(bcx.ccx, 0)).llval;
+                let tr_elem = self.trans_operand(&bcx, elem);
+                let size = count.as_u64();
+                let size = C_usize(bcx.ccx, size);
+                let base = base::get_dataptr(&bcx, dest.llval);
+                let align = dest.alignment.to_align();
 
                 if let OperandValue::Immediate(v) = tr_elem.val {
-                    let align = dest.alignment.non_abi()
-                        .unwrap_or(tr_elem.layout.align);
-                    let align = C_i32(bcx.ccx, align.abi() as i32);
-                    let size = C_usize(bcx.ccx, dest.layout.size.bytes());
-
                     // Use llvm.memset.p0i8.* to initialize all zero arrays
                     if common::is_const_integral(v) && common::const_to_uint(v) == 0 {
-                        let fill = C_u8(bcx.ccx, 0);
-                        base::call_memset(&bcx, start, fill, size, align, false);
+                        let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty));
+                        let align = C_i32(bcx.ccx, align as i32);
+                        let ty = type_of::type_of(bcx.ccx, dest_ty);
+                        let size = machine::llsize_of(bcx.ccx, ty);
+                        let fill = C_uint(Type::i8(bcx.ccx), 0);
+                        base::call_memset(&bcx, base, fill, size, align, false);
                         return bcx;
                     }
 
                     // Use llvm.memset.p0i8.* to initialize byte arrays
-                    let v = base::from_immediate(&bcx, v);
                     if common::val_ty(v) == Type::i8(bcx.ccx) {
-                        base::call_memset(&bcx, start, v, size, align, false);
+                        let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty));
+                        let align = C_i32(bcx.ccx, align as i32);
+                        base::call_memset(&bcx, base, v, size, align, false);
                         return bcx;
                     }
                 }
 
-                let count = count.as_u64();
-                let count = C_usize(bcx.ccx, count);
-                let end = dest.project_index(&bcx, count).llval;
-
-                let header_bcx = bcx.build_sibling_block("repeat_loop_header");
-                let body_bcx = bcx.build_sibling_block("repeat_loop_body");
-                let next_bcx = bcx.build_sibling_block("repeat_loop_next");
-
-                bcx.br(header_bcx.llbb());
-                let current = header_bcx.phi(common::val_ty(start), &[start], &[bcx.llbb()]);
-
-                let keep_going = header_bcx.icmp(llvm::IntNE, current, end);
-                header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
-
-                tr_elem.val.store(&body_bcx,
-                    LvalueRef::new_sized(current, tr_elem.layout, dest.alignment));
-
-                let next = body_bcx.inbounds_gep(current, &[C_usize(bcx.ccx, 1)]);
-                body_bcx.br(header_bcx.llbb());
-                header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb());
-
-                next_bcx
+                tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| {
+                    self.store_operand(bcx, llslot, align, tr_elem);
+                    bcx.br(loop_bb);
+                })
             }
 
             mir::Rvalue::Aggregate(ref kind, ref operands) => {
-                let (dest, active_field_index) = match **kind {
-                    mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
-                        dest.trans_set_discr(&bcx, variant_index);
-                        if adt_def.is_enum() {
-                            (dest.project_downcast(&bcx, variant_index), active_field_index)
-                        } else {
-                            (dest, active_field_index)
+                match **kind {
+                    mir::AggregateKind::Adt(adt_def, variant_index, substs, active_field_index) => {
+                        let discr = adt_def.discriminant_for_variant(bcx.tcx(), variant_index)
+                           .to_u128_unchecked() as u64;
+                        let dest_ty = dest.ty.to_ty(bcx.tcx());
+                        adt::trans_set_discr(&bcx, dest_ty, dest.llval, discr);
+                        for (i, operand) in operands.iter().enumerate() {
+                            let op = self.trans_operand(&bcx, operand);
+                            // Do not generate stores and GEPis for zero-sized fields.
+                            if !common::type_is_zero_size(bcx.ccx, op.ty) {
+                                let mut val = LvalueRef::new_sized(
+                                    dest.llval, dest.ty, dest.alignment);
+                                let field_index = active_field_index.unwrap_or(i);
+                                val.ty = LvalueTy::Downcast {
+                                    adt_def,
+                                    substs: self.monomorphize(&substs),
+                                    variant_index,
+                                };
+                                let (lldest_i, align) = val.trans_field_ptr(&bcx, field_index);
+                                self.store_operand(&bcx, lldest_i, align.to_align(), op);
+                            }
+                        }
+                    },
+                    _ => {
+                        // If this is a tuple or closure, we need to translate GEP indices.
+                        let layout = bcx.ccx.layout_of(dest.ty.to_ty(bcx.tcx()));
+                        let get_memory_index = |i| {
+                            if let Layout::Univariant { ref variant, .. } = *layout {
+                                adt::struct_llfields_index(variant, i)
+                            } else {
+                                i
+                            }
+                        };
+                        let alignment = dest.alignment;
+                        for (i, operand) in operands.iter().enumerate() {
+                            let op = self.trans_operand(&bcx, operand);
+                            // Do not generate stores and GEPis for zero-sized fields.
+                            if !common::type_is_zero_size(bcx.ccx, op.ty) {
+                                // Note: perhaps this should be StructGep, but
+                                // note that in some cases the values here will
+                                // not be structs but arrays.
+                                let i = get_memory_index(i);
+                                let dest = bcx.gepi(dest.llval, &[0, i]);
+                                self.store_operand(&bcx, dest, alignment.to_align(), op);
+                            }
                         }
-                    }
-                    _ => (dest, None)
-                };
-                for (i, operand) in operands.iter().enumerate() {
-                    let op = self.trans_operand(&bcx, operand);
-                    // Do not generate stores and GEPis for zero-sized fields.
-                    if !op.layout.is_zst() {
-                        let field_index = active_field_index.unwrap_or(i);
-                        op.val.store(&bcx, dest.project_field(&bcx, field_index));
                     }
                 }
                 bcx
@@ -174,7 +191,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             _ => {
                 assert!(self.rvalue_creates_operand(rvalue));
                 let (bcx, temp) = self.trans_rvalue_operand(bcx, rvalue);
-                temp.val.store(&bcx, dest);
+                self.store_operand(&bcx, dest.llval, dest.alignment.to_align(), temp);
                 bcx
             }
         }
@@ -188,32 +205,32 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         assert!(self.rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
 
         match *rvalue {
-            mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
+            mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
                 let operand = self.trans_operand(&bcx, source);
                 debug!("cast operand is {:?}", operand);
-                let cast = bcx.ccx.layout_of(self.monomorphize(&mir_cast_ty));
+                let cast_ty = self.monomorphize(&cast_ty);
 
                 let val = match *kind {
                     mir::CastKind::ReifyFnPointer => {
-                        match operand.layout.ty.sty {
+                        match operand.ty.sty {
                             ty::TyFnDef(def_id, substs) => {
                                 OperandValue::Immediate(
                                     callee::resolve_and_get_fn(bcx.ccx, def_id, substs))
                             }
                             _ => {
-                                bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
+                                bug!("{} cannot be reified to a fn ptr", operand.ty)
                             }
                         }
                     }
                     mir::CastKind::ClosureFnPointer => {
-                        match operand.layout.ty.sty {
+                        match operand.ty.sty {
                             ty::TyClosure(def_id, substs) => {
                                 let instance = monomorphize::resolve_closure(
                                     bcx.ccx.tcx(), def_id, substs, ty::ClosureKind::FnOnce);
                                 OperandValue::Immediate(callee::get_fn(bcx.ccx, instance))
                             }
                             _ => {
-                                bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
+                                bug!("{} cannot be cast to a fn ptr", operand.ty)
                             }
                         }
                     }
@@ -222,24 +239,26 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                         operand.val
                     }
                     mir::CastKind::Unsize => {
-                        assert!(cast.is_llvm_scalar_pair());
+                        // unsize targets other than to a fat pointer currently
+                        // can't be operands.
+                        assert!(common::type_is_fat_ptr(bcx.ccx, cast_ty));
+
                         match operand.val {
                             OperandValue::Pair(lldata, llextra) => {
                                 // unsize from a fat pointer - this is a
                                 // "trait-object-to-supertrait" coercion, for
                                 // example,
                                 //   &'a fmt::Debug+Send => &'a fmt::Debug,
-
-                                // HACK(eddyb) have to bitcast pointers
-                                // until LLVM removes pointee types.
-                                let lldata = bcx.pointercast(lldata,
-                                    cast.scalar_pair_element_llvm_type(bcx.ccx, 0));
+                                // So we need to pointercast the base to ensure
+                                // the types match up.
+                                let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast_ty);
+                                let lldata = bcx.pointercast(lldata, llcast_ty);
                                 OperandValue::Pair(lldata, llextra)
                             }
                             OperandValue::Immediate(lldata) => {
                                 // "standard" unsize
                                 let (lldata, llextra) = base::unsize_thin_ptr(&bcx, lldata,
-                                    operand.layout.ty, cast.ty);
+                                    operand.ty, cast_ty);
                                 OperandValue::Pair(lldata, llextra)
                             }
                             OperandValue::Ref(..) => {
@@ -248,17 +267,20 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                             }
                         }
                     }
-                    mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => {
-                        if let OperandValue::Pair(data_ptr, meta) = operand.val {
-                            if cast.is_llvm_scalar_pair() {
-                                let data_cast = bcx.pointercast(data_ptr,
-                                    cast.scalar_pair_element_llvm_type(bcx.ccx, 0));
-                                OperandValue::Pair(data_cast, meta)
+                    mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.ty) => {
+                        let ll_cast_ty = type_of::immediate_type_of(bcx.ccx, cast_ty);
+                        let ll_from_ty = type_of::immediate_type_of(bcx.ccx, operand.ty);
+                        if let OperandValue::Pair(data_ptr, meta_ptr) = operand.val {
+                            if common::type_is_fat_ptr(bcx.ccx, cast_ty) {
+                                let ll_cft = ll_cast_ty.field_types();
+                                let ll_fft = ll_from_ty.field_types();
+                                let data_cast = bcx.pointercast(data_ptr, ll_cft[0]);
+                                assert_eq!(ll_cft[1].kind(), ll_fft[1].kind());
+                                OperandValue::Pair(data_cast, meta_ptr)
                             } else { // cast to thin-ptr
                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
                                 // pointer-cast of that pointer to desired pointer type.
-                                let llcast_ty = cast.immediate_llvm_type(bcx.ccx);
-                                let llval = bcx.pointercast(data_ptr, llcast_ty);
+                                let llval = bcx.pointercast(data_ptr, ll_cast_ty);
                                 OperandValue::Immediate(llval)
                             }
                         } else {
@@ -266,32 +288,30 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                         }
                     }
                     mir::CastKind::Misc => {
-                        assert!(cast.is_llvm_immediate());
-                        let r_t_in = CastTy::from_ty(operand.layout.ty)
-                            .expect("bad input type for cast");
-                        let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
-                        let ll_t_in = operand.layout.immediate_llvm_type(bcx.ccx);
-                        let ll_t_out = cast.immediate_llvm_type(bcx.ccx);
+                        debug_assert!(common::type_is_immediate(bcx.ccx, cast_ty));
+                        let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
+                        let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
+                        let ll_t_in = type_of::immediate_type_of(bcx.ccx, operand.ty);
+                        let ll_t_out = type_of::immediate_type_of(bcx.ccx, cast_ty);
                         let llval = operand.immediate();
-
-                        let mut signed = false;
-                        if let layout::Abi::Scalar(ref scalar) = operand.layout.abi {
-                            if let layout::Int(_, s) = scalar.value {
-                                signed = s;
-
-                                if scalar.valid_range.end > scalar.valid_range.start {
-                                    // We want `table[e as usize]` to not
-                                    // have bound checks, and this is the most
-                                    // convenient place to put the `assume`.
-
-                                    base::call_assume(&bcx, bcx.icmp(
-                                        llvm::IntULE,
-                                        llval,
-                                        C_uint_big(ll_t_in, scalar.valid_range.end)
-                                    ));
-                                }
+                        let l = bcx.ccx.layout_of(operand.ty);
+                        let signed = if let Layout::CEnum { signed, min, max, .. } = *l {
+                            if max > min {
+                                // We want `table[e as usize]` to not
+                                // have bound checks, and this is the most
+                                // convenient place to put the `assume`.
+
+                                base::call_assume(&bcx, bcx.icmp(
+                                    llvm::IntULE,
+                                    llval,
+                                    C_uint(common::val_ty(llval), max)
+                                ));
                             }
-                        }
+
+                            signed
+                        } else {
+                            operand.ty.is_signed()
+                        };
 
                         let newval = match (r_t_in, r_t_out) {
                             (CastTy::Int(_), CastTy::Int(_)) => {
@@ -323,43 +343,49 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                                 cast_float_to_int(&bcx, true, llval, ll_t_in, ll_t_out),
                             (CastTy::Float, CastTy::Int(_)) =>
                                 cast_float_to_int(&bcx, false, llval, ll_t_in, ll_t_out),
-                            _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty)
+                            _ => bug!("unsupported cast: {:?} to {:?}", operand.ty, cast_ty)
                         };
                         OperandValue::Immediate(newval)
                     }
                 };
-                (bcx, OperandRef {
+                let operand = OperandRef {
                     val,
-                    layout: cast
-                })
+                    ty: cast_ty
+                };
+                (bcx, operand)
             }
 
             mir::Rvalue::Ref(_, bk, ref lvalue) => {
                 let tr_lvalue = self.trans_lvalue(&bcx, lvalue);
 
-                let ty = tr_lvalue.layout.ty;
+                let ty = tr_lvalue.ty.to_ty(bcx.tcx());
+                let ref_ty = bcx.tcx().mk_ref(
+                    bcx.tcx().types.re_erased,
+                    ty::TypeAndMut { ty: ty, mutbl: bk.to_mutbl_lossy() }
+                );
 
                 // Note: lvalues are indirect, so storing the `llval` into the
                 // destination effectively creates a reference.
-                let val = if !bcx.ccx.shared().type_has_metadata(ty) {
-                    OperandValue::Immediate(tr_lvalue.llval)
+                let operand = if !bcx.ccx.shared().type_has_metadata(ty) {
+                    OperandRef {
+                        val: OperandValue::Immediate(tr_lvalue.llval),
+                        ty: ref_ty,
+                    }
                 } else {
-                    OperandValue::Pair(tr_lvalue.llval, tr_lvalue.llextra)
+                    OperandRef {
+                        val: OperandValue::Pair(tr_lvalue.llval,
+                                                tr_lvalue.llextra),
+                        ty: ref_ty,
+                    }
                 };
-                (bcx, OperandRef {
-                    val,
-                    layout: self.ccx.layout_of(self.ccx.tcx().mk_ref(
-                        self.ccx.tcx().types.re_erased,
-                        ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
-                    )),
-                })
+                (bcx, operand)
             }
 
             mir::Rvalue::Len(ref lvalue) => {
                 let size = self.evaluate_array_len(&bcx, lvalue);
                 let operand = OperandRef {
                     val: OperandValue::Immediate(size),
-                    layout: bcx.ccx.layout_of(bcx.tcx().types.usize),
+                    ty: bcx.tcx().types.usize,
                 };
                 (bcx, operand)
             }
@@ -367,26 +393,26 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => {
                 let lhs = self.trans_operand(&bcx, lhs);
                 let rhs = self.trans_operand(&bcx, rhs);
-                let llresult = match (lhs.val, rhs.val) {
-                    (OperandValue::Pair(lhs_addr, lhs_extra),
-                     OperandValue::Pair(rhs_addr, rhs_extra)) => {
-                        self.trans_fat_ptr_binop(&bcx, op,
-                                                 lhs_addr, lhs_extra,
-                                                 rhs_addr, rhs_extra,
-                                                 lhs.layout.ty)
-                    }
-
-                    (OperandValue::Immediate(lhs_val),
-                     OperandValue::Immediate(rhs_val)) => {
-                        self.trans_scalar_binop(&bcx, op, lhs_val, rhs_val, lhs.layout.ty)
+                let llresult = if common::type_is_fat_ptr(bcx.ccx, lhs.ty) {
+                    match (lhs.val, rhs.val) {
+                        (OperandValue::Pair(lhs_addr, lhs_extra),
+                         OperandValue::Pair(rhs_addr, rhs_extra)) => {
+                            self.trans_fat_ptr_binop(&bcx, op,
+                                                     lhs_addr, lhs_extra,
+                                                     rhs_addr, rhs_extra,
+                                                     lhs.ty)
+                        }
+                        _ => bug!()
                     }
 
-                    _ => bug!()
+                } else {
+                    self.trans_scalar_binop(&bcx, op,
+                                            lhs.immediate(), rhs.immediate(),
+                                            lhs.ty)
                 };
                 let operand = OperandRef {
                     val: OperandValue::Immediate(llresult),
-                    layout: bcx.ccx.layout_of(
-                        op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty)),
+                    ty: op.ty(bcx.tcx(), lhs.ty, rhs.ty),
                 };
                 (bcx, operand)
             }
@@ -395,12 +421,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 let rhs = self.trans_operand(&bcx, rhs);
                 let result = self.trans_scalar_checked_binop(&bcx, op,
                                                              lhs.immediate(), rhs.immediate(),
-                                                             lhs.layout.ty);
-                let val_ty = op.ty(bcx.tcx(), lhs.layout.ty, rhs.layout.ty);
+                                                             lhs.ty);
+                let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
                 let operand_ty = bcx.tcx().intern_tup(&[val_ty, bcx.tcx().types.bool], false);
                 let operand = OperandRef {
                     val: result,
-                    layout: bcx.ccx.layout_of(operand_ty)
+                    ty: operand_ty
                 };
 
                 (bcx, operand)
@@ -409,7 +435,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             mir::Rvalue::UnaryOp(op, ref operand) => {
                 let operand = self.trans_operand(&bcx, operand);
                 let lloperand = operand.immediate();
-                let is_float = operand.layout.ty.is_fp();
+                let is_float = operand.ty.is_fp();
                 let llval = match op {
                     mir::UnOp::Not => bcx.not(lloperand),
                     mir::UnOp::Neg => if is_float {
@@ -420,43 +446,47 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 };
                 (bcx, OperandRef {
                     val: OperandValue::Immediate(llval),
-                    layout: operand.layout,
+                    ty: operand.ty,
                 })
             }
 
             mir::Rvalue::Discriminant(ref lvalue) => {
+                let discr_lvalue = self.trans_lvalue(&bcx, lvalue);
+                let enum_ty = discr_lvalue.ty.to_ty(bcx.tcx());
                 let discr_ty = rvalue.ty(&*self.mir, bcx.tcx());
-                let discr =  self.trans_lvalue(&bcx, lvalue)
-                    .trans_get_discr(&bcx, discr_ty);
+                let discr_type = type_of::immediate_type_of(bcx.ccx, discr_ty);
+                let discr = adt::trans_get_discr(&bcx, enum_ty, discr_lvalue.llval,
+                                                  discr_lvalue.alignment, Some(discr_type), true);
                 (bcx, OperandRef {
                     val: OperandValue::Immediate(discr),
-                    layout: self.ccx.layout_of(discr_ty)
+                    ty: discr_ty
                 })
             }
 
             mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
                 assert!(bcx.ccx.shared().type_is_sized(ty));
-                let val = C_usize(bcx.ccx, bcx.ccx.size_of(ty).bytes());
+                let val = C_usize(bcx.ccx, bcx.ccx.size_of(ty));
                 let tcx = bcx.tcx();
                 (bcx, OperandRef {
                     val: OperandValue::Immediate(val),
-                    layout: self.ccx.layout_of(tcx.types.usize),
+                    ty: tcx.types.usize,
                 })
             }
 
             mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
                 let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
-                let (size, align) = bcx.ccx.size_and_align_of(content_ty);
-                let llsize = C_usize(bcx.ccx, size.bytes());
-                let llalign = C_usize(bcx.ccx, align.abi());
-                let box_layout = bcx.ccx.layout_of(bcx.tcx().mk_box(content_ty));
-                let llty_ptr = box_layout.llvm_type(bcx.ccx);
+                let llty = type_of::type_of(bcx.ccx, content_ty);
+                let llsize = machine::llsize_of(bcx.ccx, llty);
+                let align = bcx.ccx.align_of(content_ty);
+                let llalign = C_usize(bcx.ccx, align as u64);
+                let llty_ptr = llty.ptr_to();
+                let box_ty = bcx.tcx().mk_box(content_ty);
 
                 // Allocate space:
                 let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
                     Ok(id) => id,
                     Err(s) => {
-                        bcx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
+                        bcx.sess().fatal(&format!("allocation of `{}` {}", box_ty, s));
                     }
                 };
                 let instance = ty::Instance::mono(bcx.tcx(), def_id);
@@ -465,7 +495,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
 
                 let operand = OperandRef {
                     val: OperandValue::Immediate(val),
-                    layout: box_layout,
+                    ty: box_ty,
                 };
                 (bcx, operand)
             }
@@ -478,8 +508,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 // According to `rvalue_creates_operand`, only ZST
                 // aggregate rvalues are allowed to be operands.
                 let ty = rvalue.ty(self.mir, self.ccx.tcx());
-                (bcx, OperandRef::new_zst(self.ccx,
-                    self.ccx.layout_of(self.monomorphize(&ty))))
+                (bcx, OperandRef::new_zst(self.ccx, self.monomorphize(&ty)))
             }
         }
     }
@@ -492,9 +521,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
         // because trans_lvalue() panics if Local is operand.
         if let mir::Lvalue::Local(index) = *lvalue {
             if let LocalRef::Operand(Some(op)) = self.locals[index] {
-                if let ty::TyArray(_, n) = op.layout.ty.sty {
-                    let n = n.val.to_const_int().unwrap().to_u64().unwrap();
-                    return common::C_usize(bcx.ccx, n);
+                if common::type_is_zero_size(bcx.ccx, op.ty) {
+                    if let ty::TyArray(_, n) = op.ty.sty {
+                        let n = n.val.to_const_int().unwrap().to_u64().unwrap();
+                        return common::C_usize(bcx.ccx, n);
+                    }
                 }
             }
         }
@@ -699,7 +730,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             mir::Rvalue::Aggregate(..) => {
                 let ty = rvalue.ty(self.mir, self.ccx.tcx());
                 let ty = self.monomorphize(&ty);
-                self.ccx.layout_of(ty).is_zst()
+                common::type_is_zero_size(self.ccx, ty)
             }
         }
 
@@ -799,7 +830,7 @@ fn cast_int_to_float(bcx: &Builder,
     if is_u128_to_f32 {
         // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity,
         // and for everything else LLVM's uitofp works just fine.
-        let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
+        let max = C_big_integral(int_ty, MAX_F32_PLUS_HALF_ULP);
         let overflow = bcx.icmp(llvm::IntUGE, x, max);
         let infinity_bits = C_u32(bcx.ccx, ieee::Single::INFINITY.to_bits() as u32);
         let infinity = consts::bitcast(infinity_bits, float_ty);
@@ -926,8 +957,8 @@ fn cast_float_to_int(bcx: &Builder,
     // performed is ultimately up to the backend, but at least x86 does perform them.
     let less_or_nan = bcx.fcmp(llvm::RealULT, x, f_min);
     let greater = bcx.fcmp(llvm::RealOGT, x, f_max);
-    let int_max = C_uint_big(int_ty, int_max(signed, int_ty));
-    let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128);
+    let int_max = C_big_integral(int_ty, int_max(signed, int_ty));
+    let int_min = C_big_integral(int_ty, int_min(signed, int_ty) as u128);
     let s0 = bcx.select(less_or_nan, int_min, fptosui_result);
     let s1 = bcx.select(greater, int_max, s0);
 
diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs
index 607ecd887fa..bbf661ae9a7 100644
--- a/src/librustc_trans/mir/statement.rs
+++ b/src/librustc_trans/mir/statement.rs
@@ -10,11 +10,14 @@
 
 use rustc::mir;
 
+use base;
 use asm;
+use common;
 use builder::Builder;
 
 use super::MirContext;
 use super::LocalRef;
+use super::super::adt;
 
 impl<'a, 'tcx> MirContext<'a, 'tcx> {
     pub fn trans_statement(&mut self,
@@ -36,16 +39,18 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                             self.locals[index] = LocalRef::Operand(Some(operand));
                             bcx
                         }
-                        LocalRef::Operand(Some(op)) => {
-                            if !op.layout.is_zst() {
+                        LocalRef::Operand(Some(_)) => {
+                            let ty = self.monomorphized_lvalue_ty(lvalue);
+
+                            if !common::type_is_zero_size(bcx.ccx, ty) {
                                 span_bug!(statement.source_info.span,
                                           "operand {:?} already assigned",
                                           rvalue);
+                            } else {
+                                // If the type is zero-sized, it's already been set here,
+                                // but we still need to make sure we translate the operand
+                                self.trans_rvalue_operand(bcx, rvalue).0
                             }
-
-                            // If the type is zero-sized, it's already been set here,
-                            // but we still need to make sure we translate the operand
-                            self.trans_rvalue_operand(bcx, rvalue).0
                         }
                     }
                 } else {
@@ -54,25 +59,24 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
                 }
             }
             mir::StatementKind::SetDiscriminant{ref lvalue, variant_index} => {
-                self.trans_lvalue(&bcx, lvalue)
-                    .trans_set_discr(&bcx, variant_index);
+                let ty = self.monomorphized_lvalue_ty(lvalue);
+                let lvalue_transed = self.trans_lvalue(&bcx, lvalue);
+                adt::trans_set_discr(&bcx,
+                    ty,
+                    lvalue_transed.llval,
+                    variant_index as u64);
                 bcx
             }
             mir::StatementKind::StorageLive(local) => {
-                if let LocalRef::Lvalue(tr_lval) = self.locals[local] {
-                    tr_lval.storage_live(&bcx);
-                }
-                bcx
+                self.trans_storage_liveness(bcx, local, base::Lifetime::Start)
             }
             mir::StatementKind::StorageDead(local) => {
-                if let LocalRef::Lvalue(tr_lval) = self.locals[local] {
-                    tr_lval.storage_dead(&bcx);
-                }
-                bcx
+                self.trans_storage_liveness(bcx, local, base::Lifetime::End)
             }
             mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => {
                 let outputs = outputs.iter().map(|output| {
-                    self.trans_lvalue(&bcx, output)
+                    let lvalue = self.trans_lvalue(&bcx, output);
+                    (lvalue.llval, lvalue.ty.to_ty(bcx.tcx()))
                 }).collect();
 
                 let input_vals = inputs.iter().map(|input| {
@@ -87,4 +91,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
             mir::StatementKind::Nop => bcx,
         }
     }
+
+    fn trans_storage_liveness(&self,
+                              bcx: Builder<'a, 'tcx>,
+                              index: mir::Local,
+                              intrinsic: base::Lifetime)
+                              -> Builder<'a, 'tcx> {
+        if let LocalRef::Lvalue(tr_lval) = self.locals[index] {
+            intrinsic.call(&bcx, tr_lval.llval);
+        }
+        bcx
+    }
 }
diff --git a/src/librustc_trans/trans_item.rs b/src/librustc_trans/trans_item.rs
index 991f99e0f6c..fb68be293a7 100644
--- a/src/librustc_trans/trans_item.rs
+++ b/src/librustc_trans/trans_item.rs
@@ -23,15 +23,14 @@ use common;
 use declare;
 use llvm;
 use monomorphize::Instance;
-use type_of::LayoutLlvmExt;
 use rustc::hir;
 use rustc::middle::trans::{Linkage, Visibility};
 use rustc::ty::{self, TyCtxt, TypeFoldable};
-use rustc::ty::layout::LayoutOf;
 use syntax::ast;
 use syntax::attr;
 use syntax_pos::Span;
 use syntax_pos::symbol::Symbol;
+use type_of;
 use std::fmt;
 
 pub use rustc::middle::trans::TransItem;
@@ -174,7 +173,7 @@ fn predefine_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
     let def_id = ccx.tcx().hir.local_def_id(node_id);
     let instance = Instance::mono(ccx.tcx(), def_id);
     let ty = common::instance_ty(ccx.tcx(), &instance);
-    let llty = ccx.layout_of(ty).llvm_type(ccx);
+    let llty = type_of::type_of(ccx, ty);
 
     let g = declare::define_global(ccx, symbol_name, llty).unwrap_or_else(|| {
         ccx.sess().span_fatal(ccx.tcx().hir.span(node_id),
diff --git a/src/librustc_trans/tvec.rs b/src/librustc_trans/tvec.rs
new file mode 100644
index 00000000000..da4a4e55a67
--- /dev/null
+++ b/src/librustc_trans/tvec.rs
@@ -0,0 +1,53 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use llvm;
+use builder::Builder;
+use llvm::{BasicBlockRef, ValueRef};
+use common::*;
+use rustc::ty::Ty;
+
+pub fn slice_for_each<'a, 'tcx, F>(
+    bcx: &Builder<'a, 'tcx>,
+    data_ptr: ValueRef,
+    unit_ty: Ty<'tcx>,
+    len: ValueRef,
+    f: F
+) -> Builder<'a, 'tcx> where F: FnOnce(&Builder<'a, 'tcx>, ValueRef, BasicBlockRef) {
+    // Special-case vectors with elements of size 0  so they don't go out of bounds (#9890)
+    let zst = type_is_zero_size(bcx.ccx, unit_ty);
+    let add = |bcx: &Builder, a, b| if zst {
+        bcx.add(a, b)
+    } else {
+        bcx.inbounds_gep(a, &[b])
+    };
+
+    let body_bcx = bcx.build_sibling_block("slice_loop_body");
+    let header_bcx = bcx.build_sibling_block("slice_loop_header");
+    let next_bcx = bcx.build_sibling_block("slice_loop_next");
+
+    let start = if zst {
+        C_usize(bcx.ccx, 1)
+    } else {
+        data_ptr
+    };
+    let end = add(&bcx, start, len);
+
+    bcx.br(header_bcx.llbb());
+    let current = header_bcx.phi(val_ty(start), &[start], &[bcx.llbb()]);
+
+    let keep_going = header_bcx.icmp(llvm::IntNE, current, end);
+    header_bcx.cond_br(keep_going, body_bcx.llbb(), next_bcx.llbb());
+
+    let next = add(&body_bcx, current, C_usize(bcx.ccx, 1));
+    f(&body_bcx, if zst { data_ptr } else { current }, header_bcx.llbb());
+    header_bcx.add_incoming_to_phi(current, next, body_bcx.llbb());
+    next_bcx
+}
diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs
index 02224858b46..ffb303688aa 100644
--- a/src/librustc_trans/type_.rs
+++ b/src/librustc_trans/type_.rs
@@ -17,7 +17,7 @@ use llvm::{Float, Double, X86_FP80, PPC_FP128, FP128};
 use context::CrateContext;
 
 use syntax::ast;
-use rustc::ty::layout::{self, Align};
+use rustc::ty::layout;
 
 use std::ffi::CString;
 use std::fmt;
@@ -66,6 +66,10 @@ impl Type {
         ty!(llvm::LLVMVoidTypeInContext(ccx.llcx()))
     }
 
+    pub fn nil(ccx: &CrateContext) -> Type {
+        Type::empty_struct(ccx)
+    }
+
     pub fn metadata(ccx: &CrateContext) -> Type {
         ty!(llvm::LLVMRustMetadataTypeInContext(ccx.llcx()))
     }
@@ -198,6 +202,9 @@ impl Type {
         ty!(llvm::LLVMStructCreateNamed(ccx.llcx(), name.as_ptr()))
     }
 
+    pub fn empty_struct(ccx: &CrateContext) -> Type {
+        Type::struct_(ccx, &[], false)
+    }
 
     pub fn array(ty: &Type, len: u64) -> Type {
         ty!(llvm::LLVMRustArrayType(ty.to_ref(), len))
@@ -207,6 +214,20 @@ impl Type {
         ty!(llvm::LLVMVectorType(ty.to_ref(), len as c_uint))
     }
 
+    pub fn vec(ccx: &CrateContext, ty: &Type) -> Type {
+        Type::struct_(ccx,
+            &[Type::array(ty, 0), Type::isize(ccx)],
+        false)
+    }
+
+    pub fn opaque_vec(ccx: &CrateContext) -> Type {
+        Type::vec(ccx, &Type::i8(ccx))
+    }
+
+    pub fn vtable_ptr(ccx: &CrateContext) -> Type {
+        Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to()
+    }
+
     pub fn kind(&self) -> TypeKind {
         unsafe {
             llvm::LLVMRustGetTypeKind(self.to_ref())
@@ -238,6 +259,19 @@ impl Type {
         }
     }
 
+    pub fn field_types(&self) -> Vec<Type> {
+        unsafe {
+            let n_elts = llvm::LLVMCountStructElementTypes(self.to_ref()) as usize;
+            if n_elts == 0 {
+                return Vec::new();
+            }
+            let mut elts = vec![Type { rf: ptr::null_mut() }; n_elts];
+            llvm::LLVMGetStructElementTypes(self.to_ref(),
+                                            elts.as_mut_ptr() as *mut TypeRef);
+            elts
+        }
+    }
+
     pub fn func_params(&self) -> Vec<Type> {
         unsafe {
             let n_args = llvm::LLVMCountParamTypes(self.to_ref()) as usize;
@@ -268,6 +302,7 @@ impl Type {
     pub fn from_integer(cx: &CrateContext, i: layout::Integer) -> Type {
         use rustc::ty::layout::Integer::*;
         match i {
+            I1 => Type::i1(cx),
             I8 => Type::i8(cx),
             I16 => Type::i16(cx),
             I32 => Type::i32(cx),
@@ -275,15 +310,4 @@ impl Type {
             I128 => Type::i128(cx),
         }
     }
-
-    /// Return a LLVM type that has at most the required alignment,
-    /// as a conservative approximation for unknown pointee types.
-    pub fn pointee_for_abi_align(ccx: &CrateContext, align: Align) -> Type {
-        if let Some(ity) = layout::Integer::for_abi_align(ccx, align) {
-            Type::from_integer(ccx, ity)
-        } else {
-            // FIXME(eddyb) We could find a better approximation here.
-            Type::i8(ccx)
-        }
-    }
 }
diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs
index 9b32c825117..cac09a81361 100644
--- a/src/librustc_trans/type_of.rs
+++ b/src/librustc_trans/type_of.rs
@@ -9,484 +9,231 @@
 // except according to those terms.
 
 use abi::FnType;
+use adt;
 use common::*;
-use rustc::hir;
+use machine;
 use rustc::ty::{self, Ty, TypeFoldable};
-use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout};
-use rustc_back::PanicStrategy;
+use rustc::ty::layout::LayoutTyper;
 use trans_item::DefPathBasedNames;
 use type_::Type;
 
-use std::fmt::Write;
+use syntax::ast;
 
-fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                                layout: TyLayout<'tcx>,
-                                defer: &mut Option<(Type, TyLayout<'tcx>)>)
-                                -> Type {
-    match layout.abi {
-        layout::Abi::Scalar(_) => bug!("handled elsewhere"),
-        layout::Abi::Vector => {
-            return Type::vector(&layout.field(ccx, 0).llvm_type(ccx),
-                                layout.fields.count() as u64);
+pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
+    match ty.sty {
+        ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) |
+        ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if ccx.shared().type_has_metadata(t) => {
+            in_memory_type_of(ccx, t).ptr_to()
         }
-        layout::Abi::ScalarPair(..) => {
-            return Type::struct_(ccx, &[
-                layout.scalar_pair_element_llvm_type(ccx, 0),
-                layout.scalar_pair_element_llvm_type(ccx, 1),
-            ], false);
-        }
-        layout::Abi::Uninhabited |
-        layout::Abi::Aggregate { .. } => {}
-    }
-
-    let name = match layout.ty.sty {
-        ty::TyClosure(..) |
-        ty::TyGenerator(..) |
-        ty::TyAdt(..) |
-        ty::TyDynamic(..) |
-        ty::TyForeign(..) |
-        ty::TyStr => {
-            let mut name = String::with_capacity(32);
-            let printer = DefPathBasedNames::new(ccx.tcx(), true, true);
-            printer.push_type_name(layout.ty, &mut name);
-            match (&layout.ty.sty, &layout.variants) {
-                (&ty::TyAdt(def, _), &layout::Variants::Single { index }) => {
-                    if def.is_enum() && !def.variants.is_empty() {
-                        write!(&mut name, "::{}", def.variants[index].name).unwrap();
-                    }
-                }
-                _ => {}
-            }
-            Some(name)
-        }
-        _ => None
-    };
-
-    match layout.fields {
-        layout::FieldPlacement::Union(_) => {
-            let size = layout.size.bytes();
-            let fill = Type::array(&Type::i8(ccx), size);
-            match name {
-                None => {
-                    Type::struct_(ccx, &[fill], layout.is_packed())
-                }
-                Some(ref name) => {
-                    let mut llty = Type::named_struct(ccx, name);
-                    llty.set_struct_body(&[fill], layout.is_packed());
-                    llty
-                }
-            }
-        }
-        layout::FieldPlacement::Array { count, .. } => {
-            Type::array(&layout.field(ccx, 0).llvm_type(ccx), count)
-        }
-        layout::FieldPlacement::Arbitrary { .. } => {
-            match name {
-                None => {
-                    Type::struct_(ccx, &struct_llfields(ccx, layout), layout.is_packed())
-                }
-                Some(ref name) => {
-                    let llty = Type::named_struct(ccx, name);
-                    *defer = Some((llty, layout));
-                    llty
-                }
-            }
+        ty::TyAdt(def, _) if def.is_box() => {
+            in_memory_type_of(ccx, ty.boxed_ty()).ptr_to()
         }
+        _ => bug!("expected fat ptr ty but got {:?}", ty)
     }
 }
 
-fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-                             layout: TyLayout<'tcx>) -> Vec<Type> {
-    debug!("struct_llfields: {:#?}", layout);
-    let field_count = layout.fields.count();
-
-    let mut offset = Size::from_bytes(0);
-    let mut result: Vec<Type> = Vec::with_capacity(1 + field_count * 2);
-    for i in layout.fields.index_by_increasing_offset() {
-        let field = layout.field(ccx, i);
-        let target_offset = layout.fields.offset(i as usize);
-        debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}",
-            i, field, offset, target_offset);
-        assert!(target_offset >= offset);
-        let padding = target_offset - offset;
-        result.push(Type::array(&Type::i8(ccx), padding.bytes()));
-        debug!("    padding before: {:?}", padding);
-
-        result.push(field.llvm_type(ccx));
-
-        if layout.is_packed() {
-            assert_eq!(padding.bytes(), 0);
-        } else {
-            assert!(field.align.abi() <= layout.align.abi(),
-                    "non-packed type has field with larger align ({}): {:#?}",
-                    field.align.abi(), layout);
+pub fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
+    let unsized_part = ccx.tcx().struct_tail(ty);
+    match unsized_part.sty {
+        ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => {
+            Type::uint_from_ty(ccx, ast::UintTy::Us)
         }
-
-        offset = target_offset + field.size;
+        ty::TyDynamic(..) => Type::vtable_ptr(ccx),
+        _ => bug!("Unexpected tail in unsized_info_ty: {:?} for ty={:?}",
+                          unsized_part, ty)
     }
-    if !layout.is_unsized() && field_count > 0 {
-        if offset > layout.size {
-            bug!("layout: {:#?} stride: {:?} offset: {:?}",
-                 layout, layout.size, offset);
-        }
-        let padding = layout.size - offset;
-        debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
-               padding, offset, layout.size);
-        result.push(Type::array(&Type::i8(ccx), padding.bytes()));
-        assert!(result.len() == 1 + field_count * 2);
-    } else {
-        debug!("struct_llfields: offset: {:?} stride: {:?}",
-               offset, layout.size);
-    }
-
-    result
 }
 
-impl<'a, 'tcx> CrateContext<'a, 'tcx> {
-    pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
-        self.layout_of(ty).align
-    }
-
-    pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
-        self.layout_of(ty).size
-    }
-
-    pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
-        self.layout_of(ty).size_and_align()
+pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
+    if t.is_bool() {
+        Type::i1(cx)
+    } else {
+        type_of(cx, t)
     }
 }
 
-#[derive(Copy, Clone, PartialEq, Eq)]
-pub enum PointerKind {
-    /// Most general case, we know no restrictions to tell LLVM.
-    Shared,
-
-    /// `&T` where `T` contains no `UnsafeCell`, is `noalias` and `readonly`.
-    Frozen,
-
-    /// `&mut T`, when we know `noalias` is safe for LLVM.
-    UniqueBorrowed,
-
-    /// `Box<T>`, unlike `UniqueBorrowed`, it also has `noalias` on returns.
-    UniqueOwned
-}
-
-#[derive(Copy, Clone)]
-pub struct PointeeInfo {
-    pub size: Size,
-    pub align: Align,
-    pub safe: Option<PointerKind>,
-}
-
-pub trait LayoutLlvmExt<'tcx> {
-    fn is_llvm_immediate(&self) -> bool;
-    fn is_llvm_scalar_pair<'a>(&self) -> bool;
-    fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
-    fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
-    fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
-                                         index: usize) -> Type;
-    fn llvm_field_index(&self, index: usize) -> u64;
-    fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size)
-                           -> Option<PointeeInfo>;
+/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`.
+/// This is the right LLVM type for an alloca containing a value of that type,
+/// and the pointee of an Lvalue Datum (which is always a LLVM pointer).
+/// For unsized types, the returned type is a fat pointer, thus the resulting
+/// LLVM type for a `Trait` Lvalue is `{ i8*, void(i8*)** }*`, which is a double
+/// indirection to the actual data, unlike a `i8` Lvalue, which is just `i8*`.
+/// This is needed due to the treatment of immediate values, as a fat pointer
+/// is too large for it to be placed in SSA value (by our rules).
+/// For the raw type without far pointer indirection, see `in_memory_type_of`.
+pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
+    let ty = if cx.shared().type_has_metadata(ty) {
+        cx.tcx().mk_imm_ptr(ty)
+    } else {
+        ty
+    };
+    in_memory_type_of(cx, ty)
 }
 
-impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
-    fn is_llvm_immediate(&self) -> bool {
-        match self.abi {
-            layout::Abi::Uninhabited |
-            layout::Abi::Scalar(_) |
-            layout::Abi::Vector => true,
-            layout::Abi::ScalarPair(..) => false,
-            layout::Abi::Aggregate { .. } => self.is_zst()
-        }
-    }
-
-    fn is_llvm_scalar_pair<'a>(&self) -> bool {
-        match self.abi {
-            layout::Abi::ScalarPair(..) => true,
-            layout::Abi::Uninhabited |
-            layout::Abi::Scalar(_) |
-            layout::Abi::Vector |
-            layout::Abi::Aggregate { .. } => false
-        }
+/// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`.
+/// This is the right LLVM type for a field/array element of that type,
+/// and is the same as `type_of` for all Sized types.
+/// Unsized types, however, are represented by a "minimal unit", e.g.
+/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
+/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
+/// If the type is an unsized struct, the regular layout is generated,
+/// with the inner-most trailing unsized field using the "minimal unit"
+/// of that field's type - this is useful for taking the address of
+/// that field and ensuring the struct has the right alignment.
+/// For the LLVM type of a value as a whole, see `type_of`.
+pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
+    // Check the cache.
+    if let Some(&llty) = cx.lltypes().borrow().get(&t) {
+        return llty;
     }
 
-    /// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`.
-    /// The pointee type of the pointer in `LvalueRef` is always this type.
-    /// For sized types, it is also the right LLVM type for an `alloca`
-    /// containing a value of that type, and most immediates (except `bool`).
-    /// Unsized types, however, are represented by a "minimal unit", e.g.
-    /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
-    /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
-    /// If the type is an unsized struct, the regular layout is generated,
-    /// with the inner-most trailing unsized field using the "minimal unit"
-    /// of that field's type - this is useful for taking the address of
-    /// that field and ensuring the struct has the right alignment.
-    fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
-        if let layout::Abi::Scalar(ref scalar) = self.abi {
-            // Use a different cache for scalars because pointers to DSTs
-            // can be either fat or thin (data pointers of fat pointers).
-            if let Some(&llty) = ccx.scalar_lltypes().borrow().get(&self.ty) {
-                return llty;
-            }
-            let llty = match scalar.value {
-                layout::Int(i, _) => Type::from_integer(ccx, i),
-                layout::F32 => Type::f32(ccx),
-                layout::F64 => Type::f64(ccx),
-                layout::Pointer => {
-                    let pointee = match self.ty.sty {
-                        ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
-                        ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
-                            ccx.layout_of(ty).llvm_type(ccx)
-                        }
-                        ty::TyAdt(def, _) if def.is_box() => {
-                            ccx.layout_of(self.ty.boxed_ty()).llvm_type(ccx)
-                        }
-                        ty::TyFnPtr(sig) => {
-                            let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig);
-                            FnType::new(ccx, sig, &[]).llvm_type(ccx)
-                        }
-                        _ => {
-                            // If we know the alignment, pick something better than i8.
-                            if let Some(pointee) = self.pointee_info_at(ccx, Size::from_bytes(0)) {
-                                Type::pointee_for_abi_align(ccx, pointee.align)
-                            } else {
-                                Type::i8(ccx)
-                            }
-                        }
-                    };
-                    pointee.ptr_to()
-                }
-            };
-            ccx.scalar_lltypes().borrow_mut().insert(self.ty, llty);
-            return llty;
-        }
+    debug!("type_of {:?}", t);
 
+    assert!(!t.has_escaping_regions(), "{:?} has escaping regions", t);
 
-        // Check the cache.
-        let variant_index = match self.variants {
-            layout::Variants::Single { index } => Some(index),
-            _ => None
-        };
-        if let Some(&llty) = ccx.lltypes().borrow().get(&(self.ty, variant_index)) {
-            return llty;
-        }
-
-        debug!("llvm_type({:#?})", self);
+    // Replace any typedef'd types with their equivalent non-typedef
+    // type. This ensures that all LLVM nominal types that contain
+    // Rust types are defined as the same LLVM types.  If we don't do
+    // this then, e.g. `Option<{myfield: bool}>` would be a different
+    // type than `Option<myrec>`.
+    let t_norm = cx.tcx().erase_regions(&t);
 
-        assert!(!self.ty.has_escaping_regions(), "{:?} has escaping regions", self.ty);
-
-        // Make sure lifetimes are erased, to avoid generating distinct LLVM
-        // types for Rust types that only differ in the choice of lifetimes.
-        let normal_ty = ccx.tcx().erase_regions(&self.ty);
-
-        let mut defer = None;
-        let llty = if self.ty != normal_ty {
-            let mut layout = ccx.layout_of(normal_ty);
-            if let Some(v) = variant_index {
-                layout = layout.for_variant(ccx, v);
-            }
-            layout.llvm_type(ccx)
-        } else {
-            uncached_llvm_type(ccx, *self, &mut defer)
-        };
-        debug!("--> mapped {:#?} to llty={:?}", self, llty);
-
-        ccx.lltypes().borrow_mut().insert((self.ty, variant_index), llty);
-
-        if let Some((mut llty, layout)) = defer {
-            llty.set_struct_body(&struct_llfields(ccx, layout), layout.is_packed())
-        }
-
-        llty
+    if t != t_norm {
+        let llty = in_memory_type_of(cx, t_norm);
+        debug!("--> normalized {:?} to {:?} llty={:?}", t, t_norm, llty);
+        cx.lltypes().borrow_mut().insert(t, llty);
+        return llty;
     }
 
-    fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
-        if let layout::Abi::Scalar(ref scalar) = self.abi {
-            if scalar.is_bool() {
-                return Type::i1(ccx);
+    let ptr_ty = |ty: Ty<'tcx>| {
+        if cx.shared().type_has_metadata(ty) {
+            if let ty::TyStr = ty.sty {
+                // This means we get a nicer name in the output (str is always
+                // unsized).
+                cx.str_slice_type()
+            } else {
+                let ptr_ty = in_memory_type_of(cx, ty).ptr_to();
+                let info_ty = unsized_info_ty(cx, ty);
+                Type::struct_(cx, &[ptr_ty, info_ty], false)
             }
+        } else {
+            in_memory_type_of(cx, ty).ptr_to()
         }
-        self.llvm_type(ccx)
-    }
+    };
 
-    fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>,
-                                         index: usize) -> Type {
-        // HACK(eddyb) special-case fat pointers until LLVM removes
-        // pointee types, to avoid bitcasting every `OperandRef::deref`.
-        match self.ty.sty {
-            ty::TyRef(..) |
-            ty::TyRawPtr(_) => {
-                return self.field(ccx, index).llvm_type(ccx);
-            }
-            ty::TyAdt(def, _) if def.is_box() => {
-                let ptr_ty = ccx.tcx().mk_mut_ptr(self.ty.boxed_ty());
-                return ccx.layout_of(ptr_ty).scalar_pair_element_llvm_type(ccx, index);
-            }
-            _ => {}
-        }
+    let mut llty = match t.sty {
+      ty::TyBool => Type::bool(cx),
+      ty::TyChar => Type::char(cx),
+      ty::TyInt(t) => Type::int_from_ty(cx, t),
+      ty::TyUint(t) => Type::uint_from_ty(cx, t),
+      ty::TyFloat(t) => Type::float_from_ty(cx, t),
+      ty::TyNever => Type::nil(cx),
+      ty::TyClosure(..) => {
+          // Only create the named struct, but don't fill it in. We
+          // fill it in *after* placing it into the type cache.
+          adt::incomplete_type_of(cx, t, "closure")
+      }
+      ty::TyGenerator(..) => {
+          // Only create the named struct, but don't fill it in. We
+          // fill it in *after* placing it into the type cache.
+          adt::incomplete_type_of(cx, t, "generator")
+      }
+
+      ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
+      ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
+          ptr_ty(ty)
+      }
+      ty::TyAdt(def, _) if def.is_box() => {
+          ptr_ty(t.boxed_ty())
+      }
+
+      ty::TyArray(ty, size) => {
+          let llty = in_memory_type_of(cx, ty);
+          let size = size.val.to_const_int().unwrap().to_u64().unwrap();
+          Type::array(&llty, size)
+      }
+
+      // Unsized slice types (and str) have the type of their element, and
+      // traits have the type of u8. This is so that the data pointer inside
+      // fat pointers is of the right type (e.g. for array accesses), even
+      // when taking the address of an unsized field in a struct.
+      ty::TySlice(ty) => in_memory_type_of(cx, ty),
+      ty::TyStr | ty::TyDynamic(..) | ty::TyForeign(..) => Type::i8(cx),
+
+      ty::TyFnDef(..) => Type::nil(cx),
+      ty::TyFnPtr(sig) => {
+        let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig);
+        FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to()
+      }
+      ty::TyTuple(ref tys, _) if tys.is_empty() => Type::nil(cx),
+      ty::TyTuple(..) => {
+          adt::type_of(cx, t)
+      }
+      ty::TyAdt(..) if t.is_simd() => {
+          let e = t.simd_type(cx.tcx());
+          if !e.is_machine() {
+              cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \
+                                        a non-machine element type `{}`",
+                                       t, e))
+          }
+          let llet = in_memory_type_of(cx, e);
+          let n = t.simd_size(cx.tcx()) as u64;
+          Type::vector(&llet, n)
+      }
+      ty::TyAdt(..) => {
+          // Only create the named struct, but don't fill it in. We
+          // fill it in *after* placing it into the type cache. This
+          // avoids creating more than one copy of the enum when one
+          // of the enum's variants refers to the enum itself.
+          let name = llvm_type_name(cx, t);
+          adt::incomplete_type_of(cx, t, &name[..])
+      }
+
+      ty::TyInfer(..) |
+      ty::TyProjection(..) |
+      ty::TyParam(..) |
+      ty::TyAnon(..) |
+      ty::TyError => bug!("type_of with {:?}", t),
+    };
 
-        let (a, b) = match self.abi {
-            layout::Abi::ScalarPair(ref a, ref b) => (a, b),
-            _ => bug!("TyLayout::scalar_pair_element_llty({:?}): not applicable", self)
-        };
-        let scalar = [a, b][index];
+    debug!("--> mapped t={:?} to llty={:?}", t, llty);
 
-        // Make sure to return the same type `immediate_llvm_type` would,
-        // to avoid dealing with two types and the associated conversions.
-        // This means that `(bool, bool)` is represented as `{i1, i1}`,
-        // both in memory and as an immediate, while `bool` is typically
-        // `i8` in memory and only `i1` when immediate. While we need to
-        // load/store `bool` as `i8` to avoid crippling LLVM optimizations,
-        // `i1` in a LLVM aggregate is valid and mostly equivalent to `i8`.
-        if scalar.is_bool() {
-            return Type::i1(ccx);
-        }
+    cx.lltypes().borrow_mut().insert(t, llty);
 
-        match scalar.value {
-            layout::Int(i, _) => Type::from_integer(ccx, i),
-            layout::F32 => Type::f32(ccx),
-            layout::F64 => Type::f64(ccx),
-            layout::Pointer => {
-                // If we know the alignment, pick something better than i8.
-                let offset = if index == 0 {
-                    Size::from_bytes(0)
-                } else {
-                    a.value.size(ccx).abi_align(b.value.align(ccx))
-                };
-                let pointee = if let Some(pointee) = self.pointee_info_at(ccx, offset) {
-                    Type::pointee_for_abi_align(ccx, pointee.align)
-                } else {
-                    Type::i8(ccx)
-                };
-                pointee.ptr_to()
-            }
+    // If this was an enum or struct, fill in the type now.
+    match t.sty {
+        ty::TyAdt(..) | ty::TyClosure(..) | ty::TyGenerator(..) if !t.is_simd() && !t.is_box() => {
+            adt::finish_type_of(cx, t, &mut llty);
         }
+        _ => ()
     }
 
-    fn llvm_field_index(&self, index: usize) -> u64 {
-        match self.abi {
-            layout::Abi::Scalar(_) |
-            layout::Abi::ScalarPair(..) => {
-                bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
-            }
-            _ => {}
-        }
-        match self.fields {
-            layout::FieldPlacement::Union(_) => {
-                bug!("TyLayout::llvm_field_index({:?}): not applicable", self)
-            }
-
-            layout::FieldPlacement::Array { .. } => {
-                index as u64
-            }
+    llty
+}
 
-            layout::FieldPlacement::Arbitrary { .. } => {
-                1 + (self.fields.memory_index(index) as u64) * 2
-            }
-        }
+impl<'a, 'tcx> CrateContext<'a, 'tcx> {
+    pub fn align_of(&self, ty: Ty<'tcx>) -> machine::llalign {
+        self.layout_of(ty).align(self).abi() as machine::llalign
     }
 
-    fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size)
-                           -> Option<PointeeInfo> {
-        if let Some(&pointee) = ccx.pointee_infos().borrow().get(&(self.ty, offset)) {
-            return pointee;
-        }
-
-        let mut result = None;
-        match self.ty.sty {
-            ty::TyRawPtr(mt) if offset.bytes() == 0 => {
-                let (size, align) = ccx.size_and_align_of(mt.ty);
-                result = Some(PointeeInfo {
-                    size,
-                    align,
-                    safe: None
-                });
-            }
-
-            ty::TyRef(_, mt) if offset.bytes() == 0 => {
-                let (size, align) = ccx.size_and_align_of(mt.ty);
-
-                let kind = match mt.mutbl {
-                    hir::MutImmutable => if ccx.shared().type_is_freeze(mt.ty) {
-                        PointerKind::Frozen
-                    } else {
-                        PointerKind::Shared
-                    },
-                    hir::MutMutable => {
-                        if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias ||
-                           ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort {
-                            PointerKind::UniqueBorrowed
-                        } else {
-                            PointerKind::Shared
-                        }
-                    }
-                };
-
-                result = Some(PointeeInfo {
-                    size,
-                    align,
-                    safe: Some(kind)
-                });
-            }
-
-            _ => {
-                let mut data_variant = match self.variants {
-                    layout::Variants::NicheFilling { dataful_variant, .. } => {
-                        // Only the niche itself is always initialized,
-                        // so only check for a pointer at its offset.
-                        //
-                        // If the niche is a pointer, it's either valid
-                        // (according to its type), or null (which the
-                        // niche field's scalar validity range encodes).
-                        // This allows using `dereferenceable_or_null`
-                        // for e.g. `Option<&T>`, and this will continue
-                        // to work as long as we don't start using more
-                        // niches than just null (e.g. the first page
-                        // of the address space, or unaligned pointers).
-                        if self.fields.offset(0) == offset {
-                            Some(self.for_variant(ccx, dataful_variant))
-                        } else {
-                            None
-                        }
-                    }
-                    _ => Some(*self)
-                };
-
-                if let Some(variant) = data_variant {
-                    // We're not interested in any unions.
-                    if let layout::FieldPlacement::Union(_) = variant.fields {
-                        data_variant = None;
-                    }
-                }
-
-                if let Some(variant) = data_variant {
-                    let ptr_end = offset + layout::Pointer.size(ccx);
-                    for i in 0..variant.fields.count() {
-                        let field_start = variant.fields.offset(i);
-                        if field_start <= offset {
-                            let field = variant.field(ccx, i);
-                            if ptr_end <= field_start + field.size {
-                                // We found the right field, look inside it.
-                                result = field.pointee_info_at(ccx, offset - field_start);
-                                break;
-                            }
-                        }
-                    }
-                }
+    pub fn size_of(&self, ty: Ty<'tcx>) -> machine::llsize {
+        self.layout_of(ty).size(self).bytes() as machine::llsize
+    }
 
-                // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
-                if let Some(ref mut pointee) = result {
-                    if let ty::TyAdt(def, _) = self.ty.sty {
-                        if def.is_box() && offset.bytes() == 0 {
-                            pointee.safe = Some(PointerKind::UniqueOwned);
-                        }
-                    }
-                }
-            }
+    pub fn over_align_of(&self, t: Ty<'tcx>)
+                              -> Option<machine::llalign> {
+        let layout = self.layout_of(t);
+        if let Some(align) = layout.over_align(&self.tcx().data_layout) {
+            Some(align as machine::llalign)
+        } else {
+            None
         }
-
-        ccx.pointee_infos().borrow_mut().insert((self.ty, offset), result);
-        result
     }
 }
+
+fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String {
+    let mut name = String::with_capacity(32);
+    let printer = DefPathBasedNames::new(cx.tcx(), true, true);
+    printer.push_type_name(ty, &mut name);
+    name
+}
diff --git a/src/librustc_trans_utils/monomorphize.rs b/src/librustc_trans_utils/monomorphize.rs
index eee5c1d9ef2..ab61dacf010 100644
--- a/src/librustc_trans_utils/monomorphize.rs
+++ b/src/librustc_trans_utils/monomorphize.rs
@@ -12,7 +12,7 @@ use rustc::hir::def_id::DefId;
 use rustc::middle::lang_items::DropInPlaceFnLangItem;
 use rustc::traits;
 use rustc::ty::adjustment::CustomCoerceUnsized;
-use rustc::ty::subst::{Kind, Subst};
+use rustc::ty::subst::{Kind, Subst, Substs};
 use rustc::ty::{self, Ty, TyCtxt};
 
 pub use rustc::ty::Instance;
@@ -125,3 +125,12 @@ pub fn custom_coerce_unsize_info<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
     }
 }
 
+/// Returns the normalized type of a struct field
+pub fn field_ty<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
+                          param_substs: &Substs<'tcx>,
+                          f: &'tcx ty::FieldDef)
+                          -> Ty<'tcx>
+{
+    tcx.fully_normalize_associated_types_in(&f.ty(tcx, param_substs))
+}
+
diff --git a/src/rustllvm/RustWrapper.cpp b/src/rustllvm/RustWrapper.cpp
index 9aa172591b8..20ea8d70302 100644
--- a/src/rustllvm/RustWrapper.cpp
+++ b/src/rustllvm/RustWrapper.cpp
@@ -178,22 +178,6 @@ extern "C" void LLVMRustAddCallSiteAttribute(LLVMValueRef Instr, unsigned Index,
 #endif
 }
 
-extern "C" void LLVMRustAddAlignmentCallSiteAttr(LLVMValueRef Instr,
-                                                 unsigned Index,
-                                                 uint32_t Bytes) {
-  CallSite Call = CallSite(unwrap<Instruction>(Instr));
-  AttrBuilder B;
-  B.addAlignmentAttr(Bytes);
-#if LLVM_VERSION_GE(5, 0)
-  Call.setAttributes(Call.getAttributes().addAttributes(
-      Call->getContext(), Index, B));
-#else
-  Call.setAttributes(Call.getAttributes().addAttributes(
-      Call->getContext(), Index,
-      AttributeSet::get(Call->getContext(), Index, B)));
-#endif
-}
-
 extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr,
                                                        unsigned Index,
                                                        uint64_t Bytes) {
@@ -210,22 +194,6 @@ extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr,
 #endif
 }
 
-extern "C" void LLVMRustAddDereferenceableOrNullCallSiteAttr(LLVMValueRef Instr,
-                                                             unsigned Index,
-                                                             uint64_t Bytes) {
-  CallSite Call = CallSite(unwrap<Instruction>(Instr));
-  AttrBuilder B;
-  B.addDereferenceableOrNullAttr(Bytes);
-#if LLVM_VERSION_GE(5, 0)
-  Call.setAttributes(Call.getAttributes().addAttributes(
-      Call->getContext(), Index, B));
-#else
-  Call.setAttributes(Call.getAttributes().addAttributes(
-      Call->getContext(), Index,
-      AttributeSet::get(Call->getContext(), Index, B)));
-#endif
-}
-
 extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index,
                                              LLVMRustAttribute RustAttr) {
   Function *A = unwrap<Function>(Fn);
@@ -238,19 +206,6 @@ extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index,
 #endif
 }
 
-extern "C" void LLVMRustAddAlignmentAttr(LLVMValueRef Fn,
-                                         unsigned Index,
-                                         uint32_t Bytes) {
-  Function *A = unwrap<Function>(Fn);
-  AttrBuilder B;
-  B.addAlignmentAttr(Bytes);
-#if LLVM_VERSION_GE(5, 0)
-  A->addAttributes(Index, B);
-#else
-  A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B));
-#endif
-}
-
 extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index,
                                                uint64_t Bytes) {
   Function *A = unwrap<Function>(Fn);
@@ -263,19 +218,6 @@ extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index,
 #endif
 }
 
-extern "C" void LLVMRustAddDereferenceableOrNullAttr(LLVMValueRef Fn,
-                                                     unsigned Index,
-                                                     uint64_t Bytes) {
-  Function *A = unwrap<Function>(Fn);
-  AttrBuilder B;
-  B.addDereferenceableOrNullAttr(Bytes);
-#if LLVM_VERSION_GE(5, 0)
-  A->addAttributes(Index, B);
-#else
-  A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B));
-#endif
-}
-
 extern "C" void LLVMRustAddFunctionAttrStringValue(LLVMValueRef Fn,
                                                    unsigned Index,
                                                    const char *Name,
@@ -315,18 +257,21 @@ extern "C" void LLVMRustSetHasUnsafeAlgebra(LLVMValueRef V) {
 
 extern "C" LLVMValueRef
 LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMValueRef Source, const char *Name,
-                        LLVMAtomicOrdering Order) {
+                        LLVMAtomicOrdering Order, unsigned Alignment) {
   LoadInst *LI = new LoadInst(unwrap(Source), 0);
   LI->setAtomic(fromRust(Order));
+  LI->setAlignment(Alignment);
   return wrap(unwrap(B)->Insert(LI, Name));
 }
 
 extern "C" LLVMValueRef LLVMRustBuildAtomicStore(LLVMBuilderRef B,
                                                  LLVMValueRef V,
                                                  LLVMValueRef Target,
-                                                 LLVMAtomicOrdering Order) {
+                                                 LLVMAtomicOrdering Order,
+                                                 unsigned Alignment) {
   StoreInst *SI = new StoreInst(unwrap(V), unwrap(Target));
   SI->setAtomic(fromRust(Order));
+  SI->setAlignment(Alignment);
   return wrap(unwrap(B)->Insert(SI));
 }
 
diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs
index 2b35d454739..342a4f0d085 100644
--- a/src/test/codegen/adjustments.rs
+++ b/src/test/codegen/adjustments.rs
@@ -9,7 +9,6 @@
 // except according to those terms.
 
 // compile-flags: -C no-prepopulate-passes
-// ignore-tidy-linelength
 
 #![crate_type = "lib"]
 
@@ -24,9 +23,9 @@ pub fn helper(_: usize) {
 pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] {
     // We used to generate an extra alloca and memcpy for the block's trailing expression value, so
     // check that we copy directly to the return value slot
-// CHECK: %0 = insertvalue { [0 x i8]*, [[USIZE]] } undef, [0 x i8]* %x.0, 0
-// CHECK: %1 = insertvalue { [0 x i8]*, [[USIZE]] } %0, [[USIZE]] %x.1, 1
-// CHECK: ret { [0 x i8]*, [[USIZE]] } %1
+// CHECK: %0 = insertvalue { i8*, [[USIZE]] } undef, i8* %x.ptr, 0
+// CHECK: %1 = insertvalue { i8*, [[USIZE]] } %0, [[USIZE]] %x.meta, 1
+// CHECK: ret { i8*, [[USIZE]] } %1
     { x }
 }
 
diff --git a/src/test/codegen/consts.rs b/src/test/codegen/consts.rs
index a75b8f3992d..33b4221b733 100644
--- a/src/test/codegen/consts.rs
+++ b/src/test/codegen/consts.rs
@@ -54,7 +54,7 @@ pub fn inline_enum_const() -> E<i8, i16> {
 #[no_mangle]
 pub fn low_align_const() -> E<i16, [i16; 3]> {
 // Check that low_align_const and high_align_const use the same constant
-// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]]
+// CHECK: load {{.*}} bitcast ({ i16, i16, [4 x i8] }** [[LOW_HIGH_REF]]
     *&E::A(0)
 }
 
@@ -62,6 +62,6 @@ pub fn low_align_const() -> E<i16, [i16; 3]> {
 #[no_mangle]
 pub fn high_align_const() -> E<i16, i32> {
 // Check that low_align_const and high_align_const use the same constant
-// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]]
+// CHECK: load {{.*}} bitcast ({ i16, i16, [4 x i8] }** [[LOW_HIGH_REF]]
     *&E::A(0)
 }
diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs
index f8945a6ee8d..29e2840c881 100644
--- a/src/test/codegen/function-arguments.rs
+++ b/src/test/codegen/function-arguments.rs
@@ -9,13 +9,12 @@
 // except according to those terms.
 
 // compile-flags: -C no-prepopulate-passes
-// ignore-tidy-linelength
 
 #![crate_type = "lib"]
 #![feature(custom_attribute)]
 
 pub struct S {
-  _field: [i32; 8],
+  _field: [i64; 4],
 }
 
 pub struct UnsafeInner {
@@ -46,13 +45,13 @@ pub fn static_borrow(_: &'static i32) {
 pub fn named_borrow<'r>(_: &'r i32) {
 }
 
-// CHECK: @unsafe_borrow(i16* dereferenceable(2) %arg0)
+// CHECK: @unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0)
 // unsafe interior means this isn't actually readonly and there may be aliases ...
 #[no_mangle]
 pub fn unsafe_borrow(_: &UnsafeInner) {
 }
 
-// CHECK: @mutable_unsafe_borrow(i16* dereferenceable(2) %arg0)
+// CHECK: @mutable_unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0)
 // ... unless this is a mutable borrow, those never alias
 // ... except that there's this LLVM bug that forces us to not use noalias, see #29485
 #[no_mangle]
@@ -77,7 +76,7 @@ pub fn indirect_struct(_: S) {
 pub fn borrowed_struct(_: &S) {
 }
 
-// CHECK: noalias align 4 dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x)
+// CHECK: noalias dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x)
 #[no_mangle]
 pub fn _box(x: Box<i32>) -> Box<i32> {
   x
@@ -87,7 +86,7 @@ pub fn _box(x: Box<i32>) -> Box<i32> {
 #[no_mangle]
 pub fn struct_return() -> S {
   S {
-    _field: [0, 0, 0, 0, 0, 0, 0, 0]
+    _field: [0, 0, 0, 0]
   }
 }
 
@@ -97,43 +96,43 @@ pub fn struct_return() -> S {
 pub fn helper(_: usize) {
 }
 
-// CHECK: @slice([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1)
+// CHECK: @slice(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta)
 // FIXME #25759 This should also have `nocapture`
 #[no_mangle]
 pub fn slice(_: &[u8]) {
 }
 
-// CHECK: @mutable_slice([0 x i8]* nonnull %arg0.0, [[USIZE]] %arg0.1)
+// CHECK: @mutable_slice(i8* nonnull %arg0.ptr, [[USIZE]] %arg0.meta)
 // FIXME #25759 This should also have `nocapture`
 // ... there's this LLVM bug that forces us to not use noalias, see #29485
 #[no_mangle]
 pub fn mutable_slice(_: &mut [u8]) {
 }
 
-// CHECK: @unsafe_slice([0 x i16]* nonnull %arg0.0, [[USIZE]] %arg0.1)
+// CHECK: @unsafe_slice(%UnsafeInner* nonnull %arg0.ptr, [[USIZE]] %arg0.meta)
 // unsafe interior means this isn't actually readonly and there may be aliases ...
 #[no_mangle]
 pub fn unsafe_slice(_: &[UnsafeInner]) {
 }
 
-// CHECK: @str([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1)
+// CHECK: @str(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta)
 // FIXME #25759 This should also have `nocapture`
 #[no_mangle]
 pub fn str(_: &[u8]) {
 }
 
-// CHECK: @trait_borrow(%"core::ops::drop::Drop"* nonnull %arg0.0, {}* noalias nonnull readonly %arg0.1)
+// CHECK: @trait_borrow({}* nonnull, {}* noalias nonnull readonly)
 // FIXME #25759 This should also have `nocapture`
 #[no_mangle]
 pub fn trait_borrow(_: &Drop) {
 }
 
-// CHECK: @trait_box(%"core::ops::drop::Drop"* noalias nonnull, {}* noalias nonnull readonly)
+// CHECK: @trait_box({}* noalias nonnull, {}* noalias nonnull readonly)
 #[no_mangle]
 pub fn trait_box(_: Box<Drop>) {
 }
 
-// CHECK: { [0 x i16]*, [[USIZE]] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1)
+// CHECK: { i16*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta)
 #[no_mangle]
 pub fn return_slice(x: &[u16]) -> &[u16] {
   x
diff --git a/src/test/codegen/issue-32031.rs b/src/test/codegen/issue-32031.rs
index e5ec1738545..5d3ccbfa4ce 100644
--- a/src/test/codegen/issue-32031.rs
+++ b/src/test/codegen/issue-32031.rs
@@ -15,7 +15,7 @@
 #[no_mangle]
 pub struct F32(f32);
 
-// CHECK: define float @add_newtype_f32(float %a, float %b)
+// CHECK: define float @add_newtype_f32(float, float)
 #[inline(never)]
 #[no_mangle]
 pub fn add_newtype_f32(a: F32, b: F32) -> F32 {
@@ -25,7 +25,7 @@ pub fn add_newtype_f32(a: F32, b: F32) -> F32 {
 #[no_mangle]
 pub struct F64(f64);
 
-// CHECK: define double @add_newtype_f64(double %a, double %b)
+// CHECK: define double @add_newtype_f64(double, double)
 #[inline(never)]
 #[no_mangle]
 pub fn add_newtype_f64(a: F64, b: F64) -> F64 {
diff --git a/src/test/codegen/link_section.rs b/src/test/codegen/link_section.rs
index 1879002e7f3..98214dc5c6f 100644
--- a/src/test/codegen/link_section.rs
+++ b/src/test/codegen/link_section.rs
@@ -22,12 +22,12 @@ pub enum E {
     B(f32)
 }
 
-// CHECK: @VAR2 = constant {{.*}}, section ".test_two"
+// CHECK: @VAR2 = constant {{.*}} { i32 0, i32 666 }, section ".test_two"
 #[no_mangle]
 #[link_section = ".test_two"]
 pub static VAR2: E = E::A(666);
 
-// CHECK: @VAR3 = constant {{.*}}, section ".test_three"
+// CHECK: @VAR3 = constant {{.*}} { i32 1, float 1.000000e+00 }, section ".test_three"
 #[no_mangle]
 #[link_section = ".test_three"]
 pub static VAR3: E = E::B(1.);
diff --git a/src/test/codegen/match-optimizes-away.rs b/src/test/codegen/match-optimizes-away.rs
index d7b77937431..c0f2f64f82c 100644
--- a/src/test/codegen/match-optimizes-away.rs
+++ b/src/test/codegen/match-optimizes-away.rs
@@ -12,9 +12,11 @@
 // compile-flags: -O
 #![crate_type="lib"]
 
-pub enum Three { A, B, C }
+pub enum Three { First, Second, Third }
+use Three::*;
 
-pub enum Four { A, B, C, D }
+pub enum Four { First, Second, Third, Fourth }
+use Four::*;
 
 #[no_mangle]
 pub fn three_valued(x: Three) -> Three {
@@ -22,9 +24,9 @@ pub fn three_valued(x: Three) -> Three {
     // CHECK-NEXT: {{^.*:$}}
     // CHECK-NEXT: ret i8 %0
     match x {
-        Three::A => Three::A,
-        Three::B => Three::B,
-        Three::C => Three::C,
+        First => First,
+        Second => Second,
+        Third => Third,
     }
 }
 
@@ -34,9 +36,9 @@ pub fn four_valued(x: Four) -> Four {
     // CHECK-NEXT: {{^.*:$}}
     // CHECK-NEXT: ret i8 %0
     match x {
-        Four::A => Four::A,
-        Four::B => Four::B,
-        Four::C => Four::C,
-        Four::D => Four::D,
+        First => First,
+        Second => Second,
+        Third => Third,
+        Fourth => Fourth,
     }
 }
diff --git a/src/test/codegen/packed.rs b/src/test/codegen/packed.rs
index dd530cf03cd..99e6e38a3bf 100644
--- a/src/test/codegen/packed.rs
+++ b/src/test/codegen/packed.rs
@@ -54,6 +54,9 @@ pub struct PackedPair(u8, u32);
 // CHECK-LABEL: @pkd_pair
 #[no_mangle]
 pub fn pkd_pair(pair1: &mut PackedPair, pair2: &mut PackedPair) {
-// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 5, i32 1, i1 false)
+    // CHECK: [[V1:%[a-z0-9]+]] = load i8, i8* %{{.*}}, align 1
+    // CHECK: [[V2:%[a-z0-9]+]] = load i32, i32* %{{.*}}, align 1
+    // CHECK: store i8 [[V1]], i8* {{.*}}, align 1
+    // CHECK: store i32 [[V2]], i32* {{.*}}, align 1
     *pair2 = *pair1;
 }
diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs
index 6c00ffa754b..4b713e28b05 100644
--- a/src/test/codegen/refs.rs
+++ b/src/test/codegen/refs.rs
@@ -9,7 +9,6 @@
 // except according to those terms.
 
 // compile-flags: -C no-prepopulate-passes
-// ignore-tidy-linelength
 
 #![crate_type = "lib"]
 
@@ -24,10 +23,10 @@ pub fn helper(_: usize) {
 pub fn ref_dst(s: &[u8]) {
     // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy
     // directly to the alloca for "x"
-// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 0
-// CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]]
-// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1
-// CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]]
+// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 0
+// CHECK: store i8* %s.ptr, i8** [[X0]]
+// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { i8*, [[USIZE]] }* %x, i32 0, i32 1
+// CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]]
 
     let x = &*s;
     &x; // keep variable in an alloca
diff --git a/src/test/codegen/slice-init.rs b/src/test/codegen/slice-init.rs
index 915db493fc2..569d937c812 100644
--- a/src/test/codegen/slice-init.rs
+++ b/src/test/codegen/slice-init.rs
@@ -15,7 +15,7 @@
 // CHECK-LABEL: @zero_sized_elem
 #[no_mangle]
 pub fn zero_sized_elem() {
-    // CHECK-NOT: br label %repeat_loop_header{{.*}}
+    // CHECK-NOT: br label %slice_loop_header{{.*}}
     // CHECK-NOT: call void @llvm.memset.p0i8
     let x = [(); 4];
     drop(&x);
@@ -24,7 +24,7 @@ pub fn zero_sized_elem() {
 // CHECK-LABEL: @zero_len_array
 #[no_mangle]
 pub fn zero_len_array() {
-    // CHECK-NOT: br label %repeat_loop_header{{.*}}
+    // CHECK-NOT: br label %slice_loop_header{{.*}}
     // CHECK-NOT: call void @llvm.memset.p0i8
     let x = [4; 0];
     drop(&x);
@@ -34,7 +34,7 @@ pub fn zero_len_array() {
 #[no_mangle]
 pub fn byte_array() {
     // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 7, i[[WIDTH]] 4
-    // CHECK-NOT: br label %repeat_loop_header{{.*}}
+    // CHECK-NOT: br label %slice_loop_header{{.*}}
     let x = [7u8; 4];
     drop(&x);
 }
@@ -50,7 +50,7 @@ enum Init {
 #[no_mangle]
 pub fn byte_enum_array() {
     // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 {{.*}}, i[[WIDTH]] 4
-    // CHECK-NOT: br label %repeat_loop_header{{.*}}
+    // CHECK-NOT: br label %slice_loop_header{{.*}}
     let x = [Init::Memset; 4];
     drop(&x);
 }
@@ -59,7 +59,7 @@ pub fn byte_enum_array() {
 #[no_mangle]
 pub fn zeroed_integer_array() {
     // CHECK: call void @llvm.memset.p0i8.i[[WIDTH:[0-9]+]](i8* {{.*}}, i8 0, i[[WIDTH]] 16
-    // CHECK-NOT: br label %repeat_loop_header{{.*}}
+    // CHECK-NOT: br label %slice_loop_header{{.*}}
     let x = [0u32; 4];
     drop(&x);
 }
@@ -67,7 +67,7 @@ pub fn zeroed_integer_array() {
 // CHECK-LABEL: @nonzero_integer_array
 #[no_mangle]
 pub fn nonzero_integer_array() {
-    // CHECK: br label %repeat_loop_header{{.*}}
+    // CHECK: br label %slice_loop_header{{.*}}
     // CHECK-NOT: call void @llvm.memset.p0i8
     let x = [0x1a_2b_3c_4d_u32; 4];
     drop(&x);
diff --git a/src/test/run-make/issue-25581/test.c b/src/test/run-make/issue-25581/test.c
index 5736b173021..ab85d2bb13f 100644
--- a/src/test/run-make/issue-25581/test.c
+++ b/src/test/run-make/issue-25581/test.c
@@ -2,15 +2,10 @@
 #include <stddef.h>
 #include <stdint.h>
 
-struct ByteSlice {
-        uint8_t *data;
-        size_t len;
-};
-
-size_t slice_len(struct ByteSlice bs) {
-        return bs.len;
+size_t slice_len(uint8_t *data, size_t len) {
+    return len;
 }
 
-uint8_t slice_elem(struct ByteSlice bs, size_t idx) {
-        return bs.data[idx];
+uint8_t slice_elem(uint8_t *data, size_t len, size_t idx) {
+    return data[idx];
 }
diff --git a/src/test/run-pass/enum-discrim-manual-sizing.rs b/src/test/run-pass/enum-discrim-manual-sizing.rs
index 8557c065dc6..3bbc107e0b9 100644
--- a/src/test/run-pass/enum-discrim-manual-sizing.rs
+++ b/src/test/run-pass/enum-discrim-manual-sizing.rs
@@ -108,9 +108,6 @@ pub fn main() {
     let array_expected_size = round_up(28, align_of::<Eu64NonCLike<[u32; 5]>>());
     assert_eq!(size_of::<Eu64NonCLike<[u32; 5]>>(), array_expected_size);
     assert_eq!(size_of::<Eu64NonCLike<[u32; 6]>>(), 32);
-
-    assert_eq!(align_of::<Eu32>(), align_of::<u32>());
-    assert_eq!(align_of::<Eu64NonCLike<u8>>(), align_of::<u64>());
 }
 
 // Rounds x up to the next multiple of a
diff --git a/src/test/run-pass/enum-univariant-repr.rs b/src/test/run-pass/enum-univariant-repr.rs
index 17d614b5496..ef4cc60bf0d 100644
--- a/src/test/run-pass/enum-univariant-repr.rs
+++ b/src/test/run-pass/enum-univariant-repr.rs
@@ -22,11 +22,6 @@ enum UnivariantWithoutDescr {
     Y
 }
 
-#[repr(u8)]
-enum UnivariantWithData {
-    Z(u8),
-}
-
 pub fn main() {
     {
         assert_eq!(4, mem::size_of::<Univariant>());
@@ -49,12 +44,4 @@ pub fn main() {
         // check it has the same memory layout as u16
         assert_eq!(&[descr, descr, descr], ints);
     }
-
-    {
-        assert_eq!(2, mem::size_of::<UnivariantWithData>());
-
-        match UnivariantWithData::Z(4) {
-            UnivariantWithData::Z(x) => assert_eq!(x, 4),
-        }
-    }
 }
diff --git a/src/test/ui/print_type_sizes/uninhabited.rs b/src/test/run-pass/issue-30276.rs
index 69cc4c93360..5dd0cd8ba53 100644
--- a/src/test/ui/print_type_sizes/uninhabited.rs
+++ b/src/test/run-pass/issue-30276.rs
@@ -1,4 +1,4 @@
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
 // file at the top-level directory of this distribution and at
 // http://rust-lang.org/COPYRIGHT.
 //
@@ -8,11 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// compile-flags: -Z print-type-sizes
-
-#![feature(never_type)]
-
-pub fn main() {
-    let _x: Option<!> = None;
-    let _y: Result<u32, !> = Ok(42);
+struct Test([i32]);
+fn main() {
+    let _x: fn(_) -> Test = Test;
 }
diff --git a/src/test/run-pass/packed-struct-optimized-enum.rs b/src/test/run-pass/packed-struct-optimized-enum.rs
deleted file mode 100644
index 1179f16daa2..00000000000
--- a/src/test/run-pass/packed-struct-optimized-enum.rs
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#[repr(packed)]
-#[derive(Copy, Clone)]
-struct Packed<T>(T);
-
-fn main() {
-    let one = (Some(Packed((&(), 0))), true);
-    let two = [one, one];
-    let stride = (&two[1] as *const _ as usize) - (&two[0] as *const _ as usize);
-
-    // This can fail if rustc and LLVM disagree on the size of a type.
-    // In this case, `Option<Packed<(&(), u32)>>` was erronously not
-    // marked as packed despite needing alignment `1` and containing
-    // its `&()` discriminant, which has alignment larger than `1`.
-    assert_eq!(stride, std::mem::size_of_val(&one));
-}
diff --git a/src/test/compile-fail/issue-26548.rs b/src/test/ui/issue-26548.rs
index 39c6e97268f..2591d7bcbae 100644
--- a/src/test/compile-fail/issue-26548.rs
+++ b/src/test/ui/issue-26548.rs
@@ -8,10 +8,7 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-// error-pattern: unsupported cyclic reference between types/traits detected
-// note-pattern: the cycle begins when computing layout of
-// note-pattern: ...which then requires computing layout of
-// note-pattern: ...which then again requires computing layout of
+// error-pattern: overflow representing the type
 
 
 trait Mirror { type It: ?Sized; }
diff --git a/src/test/ui/issue-26548.stderr b/src/test/ui/issue-26548.stderr
new file mode 100644
index 00000000000..8bfe4ac733b
--- /dev/null
+++ b/src/test/ui/issue-26548.stderr
@@ -0,0 +1,9 @@
+error[E0391]: unsupported cyclic reference between types/traits detected
+  |
+note: the cycle begins when computing layout of `S`...
+note: ...which then requires computing layout of `std::option::Option<<S as Mirror>::It>`...
+note: ...which then requires computing layout of `<S as Mirror>::It`...
+  = note: ...which then again requires computing layout of `S`, completing the cycle.
+
+error: aborting due to previous error
+
diff --git a/src/test/ui/print_type_sizes/niche-filling.stdout b/src/test/ui/print_type_sizes/niche-filling.stdout
deleted file mode 100644
index af3e89a936e..00000000000
--- a/src/test/ui/print_type_sizes/niche-filling.stdout
+++ /dev/null
@@ -1,80 +0,0 @@
-print-type-size type: `IndirectNonZero<u32>`: 12 bytes, alignment: 4 bytes
-print-type-size     field `.nested`: 8 bytes
-print-type-size     field `.post`: 2 bytes
-print-type-size     field `.pre`: 1 bytes
-print-type-size     end padding: 1 bytes
-print-type-size type: `MyOption<IndirectNonZero<u32>>`: 12 bytes, alignment: 4 bytes
-print-type-size     variant `None`: 0 bytes
-print-type-size     variant `Some`: 12 bytes
-print-type-size         field `.0`: 12 bytes
-print-type-size type: `EmbeddedDiscr`: 8 bytes, alignment: 4 bytes
-print-type-size     variant `None`: 0 bytes
-print-type-size     variant `Record`: 7 bytes
-print-type-size         field `.val`: 4 bytes
-print-type-size         field `.post`: 2 bytes
-print-type-size         field `.pre`: 1 bytes
-print-type-size     end padding: 1 bytes
-print-type-size type: `NestedNonZero<u32>`: 8 bytes, alignment: 4 bytes
-print-type-size     field `.val`: 4 bytes
-print-type-size     field `.post`: 2 bytes
-print-type-size     field `.pre`: 1 bytes
-print-type-size     end padding: 1 bytes
-print-type-size type: `Enum4<(), char, (), ()>`: 4 bytes, alignment: 4 bytes
-print-type-size     variant `One`: 0 bytes
-print-type-size         field `.0`: 0 bytes
-print-type-size     variant `Two`: 4 bytes
-print-type-size         field `.0`: 4 bytes
-print-type-size     variant `Three`: 0 bytes
-print-type-size         field `.0`: 0 bytes
-print-type-size     variant `Four`: 0 bytes
-print-type-size         field `.0`: 0 bytes
-print-type-size type: `MyOption<char>`: 4 bytes, alignment: 4 bytes
-print-type-size     variant `None`: 0 bytes
-print-type-size     variant `Some`: 4 bytes
-print-type-size         field `.0`: 4 bytes
-print-type-size type: `MyOption<core::nonzero::NonZero<u32>>`: 4 bytes, alignment: 4 bytes
-print-type-size     variant `None`: 0 bytes
-print-type-size     variant `Some`: 4 bytes
-print-type-size         field `.0`: 4 bytes
-print-type-size type: `core::nonzero::NonZero<u32>`: 4 bytes, alignment: 4 bytes
-print-type-size     field `.0`: 4 bytes
-print-type-size type: `Enum4<(), (), (), MyOption<u8>>`: 2 bytes, alignment: 1 bytes
-print-type-size     variant `One`: 0 bytes
-print-type-size         field `.0`: 0 bytes
-print-type-size     variant `Two`: 0 bytes
-print-type-size         field `.0`: 0 bytes
-print-type-size     variant `Three`: 0 bytes
-print-type-size         field `.0`: 0 bytes
-print-type-size     variant `Four`: 2 bytes
-print-type-size         field `.0`: 2 bytes
-print-type-size type: `MyOption<MyOption<u8>>`: 2 bytes, alignment: 1 bytes
-print-type-size     variant `None`: 0 bytes
-print-type-size     variant `Some`: 2 bytes
-print-type-size         field `.0`: 2 bytes
-print-type-size type: `MyOption<u8>`: 2 bytes, alignment: 1 bytes
-print-type-size     discriminant: 1 bytes
-print-type-size     variant `None`: 0 bytes
-print-type-size     variant `Some`: 1 bytes
-print-type-size         field `.0`: 1 bytes
-print-type-size type: `Enum4<(), (), bool, ()>`: 1 bytes, alignment: 1 bytes
-print-type-size     variant `One`: 0 bytes
-print-type-size         field `.0`: 0 bytes
-print-type-size     variant `Two`: 0 bytes
-print-type-size         field `.0`: 0 bytes
-print-type-size     variant `Three`: 1 bytes
-print-type-size         field `.0`: 1 bytes
-print-type-size     variant `Four`: 0 bytes
-print-type-size         field `.0`: 0 bytes
-print-type-size type: `MyOption<bool>`: 1 bytes, alignment: 1 bytes
-print-type-size     variant `None`: 0 bytes
-print-type-size     variant `Some`: 1 bytes
-print-type-size         field `.0`: 1 bytes
-print-type-size type: `MyOption<core::cmp::Ordering>`: 1 bytes, alignment: 1 bytes
-print-type-size     variant `None`: 0 bytes
-print-type-size     variant `Some`: 1 bytes
-print-type-size         field `.0`: 1 bytes
-print-type-size type: `core::cmp::Ordering`: 1 bytes, alignment: 1 bytes
-print-type-size     discriminant: 1 bytes
-print-type-size     variant `Less`: 0 bytes
-print-type-size     variant `Equal`: 0 bytes
-print-type-size     variant `Greater`: 0 bytes
diff --git a/src/test/ui/print_type_sizes/niche-filling.rs b/src/test/ui/print_type_sizes/nullable.rs
index f1c419d8895..5052c59a39d 100644
--- a/src/test/ui/print_type_sizes/niche-filling.rs
+++ b/src/test/ui/print_type_sizes/nullable.rs
@@ -10,8 +10,8 @@
 
 // compile-flags: -Z print-type-sizes
 
-// This file illustrates how niche-filling enums are handled,
-// modelled after cases like `Option<&u32>`, `Option<bool>` and such.
+// This file illustrates how enums with a non-null field are handled,
+// modelled after cases like `Option<&u32>` and such.
 //
 // It uses NonZero directly, rather than `&_` or `Unique<_>`, because
 // the test is not set up to deal with target-dependent pointer width.
@@ -68,22 +68,8 @@ impl One for u32 {
     fn one() -> Self { 1 }
 }
 
-pub enum Enum4<A, B, C, D> {
-    One(A),
-    Two(B),
-    Three(C),
-    Four(D)
-}
-
 pub fn main() {
     let _x: MyOption<NonZero<u32>> = Default::default();
     let _y: EmbeddedDiscr = Default::default();
     let _z: MyOption<IndirectNonZero<u32>> = Default::default();
-    let _a: MyOption<bool> = Default::default();
-    let _b: MyOption<char> = Default::default();
-    let _c: MyOption<std::cmp::Ordering> = Default::default();
-    let _b: MyOption<MyOption<u8>> = Default::default();
-    let _e: Enum4<(), char, (), ()> = Enum4::One(());
-    let _f: Enum4<(), (), bool, ()> = Enum4::One(());
-    let _g: Enum4<(), (), (), MyOption<u8>> = Enum4::One(());
 }
diff --git a/src/test/ui/print_type_sizes/nullable.stdout b/src/test/ui/print_type_sizes/nullable.stdout
new file mode 100644
index 00000000000..830678f174f
--- /dev/null
+++ b/src/test/ui/print_type_sizes/nullable.stdout
@@ -0,0 +1,24 @@
+print-type-size type: `IndirectNonZero<u32>`: 12 bytes, alignment: 4 bytes
+print-type-size     field `.nested`: 8 bytes
+print-type-size     field `.post`: 2 bytes
+print-type-size     field `.pre`: 1 bytes
+print-type-size     end padding: 1 bytes
+print-type-size type: `MyOption<IndirectNonZero<u32>>`: 12 bytes, alignment: 4 bytes
+print-type-size     variant `Some`: 12 bytes
+print-type-size         field `.0`: 12 bytes
+print-type-size type: `EmbeddedDiscr`: 8 bytes, alignment: 4 bytes
+print-type-size     variant `Record`: 7 bytes
+print-type-size         field `.val`: 4 bytes
+print-type-size         field `.post`: 2 bytes
+print-type-size         field `.pre`: 1 bytes
+print-type-size     end padding: 1 bytes
+print-type-size type: `NestedNonZero<u32>`: 8 bytes, alignment: 4 bytes
+print-type-size     field `.val`: 4 bytes
+print-type-size     field `.post`: 2 bytes
+print-type-size     field `.pre`: 1 bytes
+print-type-size     end padding: 1 bytes
+print-type-size type: `MyOption<core::nonzero::NonZero<u32>>`: 4 bytes, alignment: 4 bytes
+print-type-size     variant `Some`: 4 bytes
+print-type-size         field `.0`: 4 bytes
+print-type-size type: `core::nonzero::NonZero<u32>`: 4 bytes, alignment: 4 bytes
+print-type-size     field `.0`: 4 bytes
diff --git a/src/test/ui/print_type_sizes/uninhabited.stdout b/src/test/ui/print_type_sizes/uninhabited.stdout
deleted file mode 100644
index 2a8706f7ac5..00000000000
--- a/src/test/ui/print_type_sizes/uninhabited.stdout
+++ /dev/null
@@ -1,5 +0,0 @@
-print-type-size type: `std::result::Result<u32, !>`: 4 bytes, alignment: 4 bytes
-print-type-size     variant `Ok`: 4 bytes
-print-type-size         field `.0`: 4 bytes
-print-type-size type: `std::option::Option<!>`: 0 bytes, alignment: 1 bytes
-print-type-size     variant `None`: 0 bytes
diff --git a/src/tools/cargotest/main.rs b/src/tools/cargotest/main.rs
index b1122f401fe..a6c56a13076 100644
--- a/src/tools/cargotest/main.rs
+++ b/src/tools/cargotest/main.rs
@@ -60,8 +60,8 @@ const TEST_REPOS: &'static [Test] = &[
     },
     Test {
         name: "servo",
-        repo: "https://github.com/eddyb/servo",
-        sha: "6031de9a397e2feba4ff98725991825f62b68518",
+        repo: "https://github.com/servo/servo",
+        sha: "38fe9533b93e985657f99a29772bf3d3c8694822",
         lock: None,
         // Only test Stylo a.k.a. Quantum CSS, the parts of Servo going into Firefox.
         // This takes much less time to build than all of Servo and supports stable Rust.
diff --git a/src/tools/toolstate.toml b/src/tools/toolstate.toml
index f1684f4c5ac..744a0f96ad7 100644
--- a/src/tools/toolstate.toml
+++ b/src/tools/toolstate.toml
@@ -26,7 +26,7 @@
 miri = "Broken"
 
 # ping @Manishearth @llogiq @mcarton @oli-obk
-clippy = "Broken"
+clippy = "Testing"
 
 # ping @nrc
 rls = "Testing"