diff options
| author | Eduard-Mihai Burtescu <edy.burt@gmail.com> | 2017-06-01 21:50:53 +0300 |
|---|---|---|
| committer | Eduard-Mihai Burtescu <edy.burt@gmail.com> | 2017-11-19 02:14:24 +0200 |
| commit | f44b0991879f5e379573d3f2fa1d702c923729f9 (patch) | |
| tree | 93a7b1fb8ab257afc7891b2bbf055bce98610700 | |
| parent | 9deea47c9605f77d3d595744753704bfd74c0dc9 (diff) | |
| download | rust-f44b0991879f5e379573d3f2fa1d702c923729f9.tar.gz rust-f44b0991879f5e379573d3f2fa1d702c923729f9.zip | |
rustc_trans: avoid working with sizes/offsets and alignments as integers.
38 files changed, 863 insertions, 986 deletions
diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 491fa2a240c..d83f7e661ba 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -24,7 +24,7 @@ use std::fmt; use std::i64; use std::iter; use std::mem; -use std::ops::Deref; +use std::ops::{Deref, Add, Sub, Mul, AddAssign}; use ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, @@ -203,6 +203,18 @@ impl TargetDataLayout { bits => bug!("ptr_sized_integer: unknown pointer bit size {}", bits) } } + + pub fn vector_align(&self, vec_size: Size) -> Align { + for &(size, align) in &self.vector_align { + if size == vec_size { + return align; + } + } + // Default to natural alignment, which is what LLVM does. + // That is, use the size, rounded up to a power of 2. + let align = vec_size.bytes().next_power_of_two(); + Align::from_bytes(align, align).unwrap() + } } pub trait HasDataLayout: Copy { @@ -236,7 +248,8 @@ pub struct Size { impl Size { pub fn from_bits(bits: u64) -> Size { - Size::from_bytes((bits + 7) / 8) + // Avoid potential overflow from `bits + 7`. + Size::from_bytes(bits / 8 + ((bits % 8) + 7) / 8) } pub fn from_bytes(bytes: u64) -> Size { @@ -261,6 +274,11 @@ impl Size { Size::from_bytes((self.bytes() + mask) & !mask) } + pub fn is_abi_aligned(self, align: Align) -> bool { + let mask = align.abi() - 1; + self.bytes() & mask == 0 + } + pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> { let dl = cx.data_layout(); @@ -278,8 +296,6 @@ impl Size { pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> { let dl = cx.data_layout(); - // Each Size is less than dl.obj_size_bound(), so the sum is - // also less than 1 << 62 (and therefore can't overflow). match self.bytes().checked_mul(count) { Some(bytes) if bytes < dl.obj_size_bound() => { Some(Size::from_bytes(bytes)) @@ -289,6 +305,46 @@ impl Size { } } +// Panicking addition, subtraction and multiplication for convenience. +// Avoid during layout computation, return `LayoutError` instead. + +impl Add for Size { + type Output = Size; + fn add(self, other: Size) -> Size { + // Each Size is less than 1 << 61, so the sum is + // less than 1 << 62 (and therefore can't overflow). + Size::from_bytes(self.bytes() + other.bytes()) + } +} + +impl Sub for Size { + type Output = Size; + fn sub(self, other: Size) -> Size { + // Each Size is less than 1 << 61, so an underflow + // would result in a value larger than 1 << 61, + // which Size::from_bytes will catch for us. + Size::from_bytes(self.bytes() - other.bytes()) + } +} + +impl Mul<u64> for Size { + type Output = Size; + fn mul(self, count: u64) -> Size { + match self.bytes().checked_mul(count) { + Some(bytes) => Size::from_bytes(bytes), + None => { + bug!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count) + } + } + } +} + +impl AddAssign for Size { + fn add_assign(&mut self, other: Size) { + *self = *self + other; + } +} + /// Alignment of a type in bytes, both ABI-mandated and preferred. /// Each field is a power of two, giving the alignment a maximum /// value of 2^(2^8 - 1), which is limited by LLVM to a i32, with @@ -301,7 +357,8 @@ pub struct Align { impl Align { pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> { - Align::from_bytes((abi + 7) / 8, (pref + 7) / 8) + Align::from_bytes(Size::from_bits(abi).bytes(), + Size::from_bits(pref).bytes()) } pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> { @@ -340,6 +397,14 @@ impl Align { 1 << self.pref } + pub fn abi_bits(self) -> u64 { + self.abi() * 8 + } + + pub fn pref_bits(self) -> u64 { + self.pref() * 8 + } + pub fn min(self, other: Align) -> Align { Align { abi: cmp::min(self.abi, other.abi), @@ -366,7 +431,7 @@ pub enum Integer { I128, } -impl Integer { +impl<'a, 'tcx> Integer { pub fn size(&self) -> Size { match *self { I1 => Size::from_bits(1), @@ -391,8 +456,7 @@ impl Integer { } } - pub fn to_ty<'a, 'tcx>(&self, tcx: &TyCtxt<'a, 'tcx, 'tcx>, - signed: bool) -> Ty<'tcx> { + pub fn to_ty(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, signed: bool) -> Ty<'tcx> { match (*self, signed) { (I1, false) => tcx.types.u8, (I8, false) => tcx.types.u8, @@ -467,12 +531,12 @@ impl Integer { /// signed discriminant range and #[repr] attribute. /// N.B.: u64 values above i64::MAX will be treated as signed, but /// that shouldn't affect anything, other than maybe debuginfo. - fn repr_discr<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - ty: Ty<'tcx>, - repr: &ReprOptions, - min: i64, - max: i64) - -> (Integer, bool) { + fn repr_discr(tcx: TyCtxt<'a, 'tcx, 'tcx>, + ty: Ty<'tcx>, + repr: &ReprOptions, + min: i64, + max: i64) + -> (Integer, bool) { // Theoretically, negative values could be larger in unsigned representation // than the unsigned representation of the signed minimum. However, if there // are any negative values, the only valid unsigned representation is u64 @@ -898,16 +962,6 @@ impl<'a, 'tcx> Struct { } Ok(None) } - - pub fn over_align(&self) -> Option<u32> { - let align = self.align.abi(); - let primitive_align = self.primitive_align.abi(); - if align > primitive_align { - Some(align as u32) - } else { - None - } - } } /// An untagged union. @@ -981,16 +1035,6 @@ impl<'a, 'tcx> Union { pub fn stride(&self) -> Size { self.min_size.abi_align(self.align) } - - pub fn over_align(&self) -> Option<u32> { - let align = self.align.abi(); - let primitive_align = self.primitive_align.abi(); - if align > primitive_align { - Some(align as u32) - } else { - None - } - } } /// The first half of a fat pointer. @@ -1607,9 +1651,8 @@ impl<'a, 'tcx> Layout { FatPointer { metadata, .. } => { // Effectively a (ptr, meta) tuple. - Pointer.size(dl).abi_align(metadata.align(dl)) - .checked_add(metadata.size(dl), dl).unwrap() - .abi_align(self.align(dl)) + (Pointer.size(dl).abi_align(metadata.align(dl)) + + metadata.size(dl)).abi_align(self.align(dl)) } CEnum { discr, .. } => Int(discr).size(dl), @@ -1638,15 +1681,7 @@ impl<'a, 'tcx> Layout { None => bug!("Layout::align({:?}): {} * {} overflowed", self, elem_size.bytes(), count) }; - for &(size, align) in &dl.vector_align { - if size == vec_size { - return align; - } - } - // Default to natural alignment, which is what LLVM does. - // That is, use the size, rounded up to a power of 2. - let align = vec_size.bytes().next_power_of_two(); - Align::from_bytes(align, align).unwrap() + dl.vector_align(vec_size) } FatPointer { metadata, .. } => { @@ -1666,7 +1701,7 @@ impl<'a, 'tcx> Layout { } /// Returns alignment before repr alignment is applied - pub fn primitive_align(&self, dl: &TargetDataLayout) -> Align { + pub fn primitive_align<C: HasDataLayout>(&self, cx: C) -> Align { match *self { Array { primitive_align, .. } | General { primitive_align, .. } => primitive_align, Univariant { ref variant, .. } | @@ -1674,18 +1709,7 @@ impl<'a, 'tcx> Layout { variant.primitive_align }, - _ => self.align(dl) - } - } - - /// Returns repr alignment if it is greater than the primitive alignment. - pub fn over_align(&self, dl: &TargetDataLayout) -> Option<u32> { - let align = self.align(dl); - let primitive_align = self.primitive_align(dl); - if align.abi() > primitive_align.abi() { - Some(align.abi() as u32) - } else { - None + _ => self.align(cx.data_layout()) } } diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index 24c3963fbc4..48f8094f98d 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -1205,15 +1205,13 @@ extern "C" { pub fn LLVMRustBuildAtomicLoad(B: BuilderRef, PointerVal: ValueRef, Name: *const c_char, - Order: AtomicOrdering, - Alignment: c_uint) + Order: AtomicOrdering) -> ValueRef; pub fn LLVMRustBuildAtomicStore(B: BuilderRef, Val: ValueRef, Ptr: ValueRef, - Order: AtomicOrdering, - Alignment: c_uint) + Order: AtomicOrdering) -> ValueRef; pub fn LLVMRustBuildAtomicCmpXchg(B: BuilderRef, @@ -1247,23 +1245,6 @@ extern "C" { /// Creates target data from a target layout string. pub fn LLVMCreateTargetData(StringRep: *const c_char) -> TargetDataRef; - /// Number of bytes clobbered when doing a Store to *T. - pub fn LLVMSizeOfTypeInBits(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong; - - /// Distance between successive elements in an array of T. Includes ABI padding. - pub fn LLVMABISizeOfType(TD: TargetDataRef, Ty: TypeRef) -> c_ulonglong; - - /// Returns the preferred alignment of a type. - pub fn LLVMPreferredAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; - /// Returns the minimum alignment of a type. - pub fn LLVMABIAlignmentOfType(TD: TargetDataRef, Ty: TypeRef) -> c_uint; - - /// Computes the byte offset of the indexed struct element for a - /// target. - pub fn LLVMOffsetOfElement(TD: TargetDataRef, - StructTy: TypeRef, - Element: c_uint) - -> c_ulonglong; /// Disposes target data. pub fn LLVMDisposeTargetData(TD: TargetDataRef); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 6df40c34ec5..ffbc4f82bca 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -30,17 +30,16 @@ use cabi_sparc64; use cabi_nvptx; use cabi_nvptx64; use cabi_hexagon; -use machine::llalign_of_min; use type_::Type; use type_of; use rustc::hir; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Layout, LayoutTyper, TyLayout, Size}; +use rustc::ty::layout::{self, Align, Layout, Size, TyLayout}; +use rustc::ty::layout::{HasDataLayout, LayoutTyper}; use rustc_back::PanicStrategy; use libc::c_uint; -use std::cmp; use std::iter; pub use syntax::abi::Abi; @@ -108,8 +107,8 @@ impl ArgAttributes { self } - pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self { - self.dereferenceable_bytes = bytes; + pub fn set_dereferenceable(&mut self, size: Size) -> &mut Self { + self.dereferenceable_bytes = size.bytes(); self } @@ -174,7 +173,32 @@ impl Reg { } impl Reg { - fn llvm_type(&self, ccx: &CrateContext) -> Type { + pub fn align(&self, ccx: &CrateContext) -> Align { + let dl = ccx.data_layout(); + match self.kind { + RegKind::Integer => { + match self.size.bits() { + 1 => dl.i1_align, + 2...8 => dl.i8_align, + 9...16 => dl.i16_align, + 17...32 => dl.i32_align, + 33...64 => dl.i64_align, + 65...128 => dl.i128_align, + _ => bug!("unsupported integer: {:?}", self) + } + } + RegKind::Float => { + match self.size.bits() { + 32 => dl.f32_align, + 64 => dl.f64_align, + _ => bug!("unsupported float: {:?}", self) + } + } + RegKind::Vector => dl.vector_align(self.size) + } + } + + pub fn llvm_type(&self, ccx: &CrateContext) -> Type { match self.kind { RegKind::Integer => Type::ix(ccx, self.size.bits()), RegKind::Float => { @@ -193,7 +217,7 @@ impl Reg { /// An argument passed entirely registers with the /// same kind (e.g. HFA / HVA on PPC64 and AArch64). -#[derive(Copy, Clone)] +#[derive(Clone, Copy, Debug)] pub struct Uniform { pub unit: Reg, @@ -216,7 +240,11 @@ impl From<Reg> for Uniform { } impl Uniform { - fn llvm_type(&self, ccx: &CrateContext) -> Type { + pub fn align(&self, ccx: &CrateContext) -> Align { + self.unit.align(ccx) + } + + pub fn llvm_type(&self, ccx: &CrateContext) -> Type { let llunit = self.unit.llvm_type(ccx); if self.total <= self.unit.size { @@ -328,11 +356,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { } // Keep track of the offset (without padding). - let size = field.size(ccx); - match unaligned_offset.checked_add(size, ccx) { - Some(offset) => unaligned_offset = offset, - None => return None - } + unaligned_offset += field.size(ccx); } // There needs to be no padding. @@ -387,6 +411,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { } } +#[derive(Clone, Copy, Debug)] pub enum CastTarget { Uniform(Uniform), Pair(Reg, Reg) @@ -405,7 +430,28 @@ impl From<Uniform> for CastTarget { } impl CastTarget { - fn llvm_type(&self, ccx: &CrateContext) -> Type { + pub fn size(&self, ccx: &CrateContext) -> Size { + match *self { + CastTarget::Uniform(u) => u.total, + CastTarget::Pair(a, b) => { + (a.size.abi_align(a.align(ccx)) + b.size) + .abi_align(self.align(ccx)) + } + } + } + + pub fn align(&self, ccx: &CrateContext) -> Align { + match *self { + CastTarget::Uniform(u) => u.align(ccx), + CastTarget::Pair(a, b) => { + ccx.data_layout().aggregate_align + .max(a.align(ccx)) + .max(b.align(ccx)) + } + } + } + + pub fn llvm_type(&self, ccx: &CrateContext) -> Type { match *self { CastTarget::Uniform(u) => u.llvm_type(ccx), CastTarget::Pair(a, b) => { @@ -426,11 +472,11 @@ impl CastTarget { pub struct ArgType<'tcx> { kind: ArgKind, pub layout: TyLayout<'tcx>, - /// Coerced LLVM Type - pub cast: Option<Type>, - /// Dummy argument, which is emitted before the real argument - pub pad: Option<Type>, - /// LLVM attributes of argument + /// Cast target, either a single uniform or a pair of registers. + pub cast: Option<CastTarget>, + /// Dummy argument, which is emitted before the real argument. + pub pad: Option<Reg>, + /// Attributes of argument. pub attrs: ArgAttributes } @@ -451,14 +497,12 @@ impl<'a, 'tcx> ArgType<'tcx> { // Wipe old attributes, likely not valid through indirection. self.attrs = ArgAttributes::default(); - let llarg_sz = self.layout.size(ccx).bytes(); - // For non-immediate arguments the callee gets its own copy of // the value on the stack, so there are no aliases. It's also // program-invisible so can't possibly capture self.attrs.set(ArgAttribute::NoAlias) .set(ArgAttribute::NoCapture) - .set_dereferenceable(llarg_sz); + .set_dereferenceable(self.layout.size(ccx)); self.kind = ArgKind::Indirect; } @@ -500,12 +544,12 @@ impl<'a, 'tcx> ArgType<'tcx> { } } - pub fn cast_to<T: Into<CastTarget>>(&mut self, ccx: &CrateContext, target: T) { - self.cast = Some(target.into().llvm_type(ccx)); + pub fn cast_to<T: Into<CastTarget>>(&mut self, target: T) { + self.cast = Some(target.into()); } - pub fn pad_with(&mut self, ccx: &CrateContext, reg: Reg) { - self.pad = Some(reg.llvm_type(ccx)); + pub fn pad_with(&mut self, reg: Reg) { + self.pad = Some(reg); } pub fn is_indirect(&self) -> bool { @@ -533,16 +577,14 @@ impl<'a, 'tcx> ArgType<'tcx> { let ccx = bcx.ccx; if self.is_indirect() { let llsz = C_usize(ccx, self.layout.size(ccx).bytes()); - let llalign = self.layout.align(ccx).abi(); - base::call_memcpy(bcx, dst, val, llsz, llalign as u32); + base::call_memcpy(bcx, dst, val, llsz, self.layout.align(ccx)); } else if let Some(ty) = self.cast { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_dst = bcx.pointercast(dst, ty.ptr_to()); - let llalign = self.layout.align(ccx).abi(); - bcx.store(val, cast_dst, Some(llalign as u32)); + let cast_dst = bcx.pointercast(dst, ty.llvm_type(ccx).ptr_to()); + bcx.store(val, cast_dst, Some(self.layout.align(ccx))); } else { // The actual return type is a struct, but the ABI // adaptation code has cast it into some scalar type. The @@ -559,8 +601,9 @@ impl<'a, 'tcx> ArgType<'tcx> { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let llscratch = bcx.alloca(ty, "abi_cast", None); - base::Lifetime::Start.call(bcx, llscratch); + let llscratch = bcx.alloca(ty.llvm_type(ccx), "abi_cast", None); + let scratch_size = ty.size(ccx); + bcx.lifetime_start(llscratch, scratch_size); // ...where we first store the value... bcx.store(val, llscratch, None); @@ -570,10 +613,9 @@ impl<'a, 'tcx> ArgType<'tcx> { bcx.pointercast(dst, Type::i8p(ccx)), bcx.pointercast(llscratch, Type::i8p(ccx)), C_usize(ccx, self.layout.size(ccx).bytes()), - cmp::min(self.layout.align(ccx).abi() as u32, - llalign_of_min(ccx, ty))); + self.layout.align(ccx).min(ty.align(ccx))); - base::Lifetime::End.call(bcx, llscratch); + bcx.lifetime_end(llscratch, scratch_size); } } else { if self.layout.ty == ccx.tcx().types.bool { @@ -840,7 +882,7 @@ impl<'a, 'tcx> FnType<'tcx> { // Replace newtypes with their inner-most type. if unit.size == size { // Needs a cast as we've unpacked a newtype. - arg.cast_to(ccx, unit); + arg.cast_to(unit); return; } @@ -850,7 +892,7 @@ impl<'a, 'tcx> FnType<'tcx> { // FIXME(eddyb) This should be using Uniform instead of a pair, // but the resulting [2 x float/double] breaks emscripten. // See https://github.com/kripken/emscripten-fastcomp/issues/178. - arg.cast_to(ccx, CastTarget::Pair(unit, unit)); + arg.cast_to(CastTarget::Pair(unit, unit)); return; } } @@ -862,7 +904,7 @@ impl<'a, 'tcx> FnType<'tcx> { // We want to pass small aggregates as immediates, but using // a LLVM aggregate type for this leads to bad optimizations, // so we pick an appropriately sized integer type instead. - arg.cast_to(ccx, Reg { + arg.cast_to(Reg { kind: RegKind::Integer, size }); @@ -931,10 +973,10 @@ impl<'a, 'tcx> FnType<'tcx> { } else if self.ret.is_indirect() { llargument_tys.push(self.ret.memory_ty(ccx).ptr_to()); Type::void(ccx) + } else if let Some(cast) = self.ret.cast { + cast.llvm_type(ccx) } else { - self.ret.cast.unwrap_or_else(|| { - type_of::immediate_type_of(ccx, self.ret.layout.ty) - }) + type_of::immediate_type_of(ccx, self.ret.layout.ty) }; for arg in &self.args { @@ -943,15 +985,15 @@ impl<'a, 'tcx> FnType<'tcx> { } // add padding if let Some(ty) = arg.pad { - llargument_tys.push(ty); + llargument_tys.push(ty.llvm_type(ccx)); } let llarg_ty = if arg.is_indirect() { arg.memory_ty(ccx).ptr_to() + } else if let Some(cast) = arg.cast { + cast.llvm_type(ccx) } else { - arg.cast.unwrap_or_else(|| { - type_of::immediate_type_of(ccx, arg.layout.ty) - }) + type_of::immediate_type_of(ccx, arg.layout.ty) }; llargument_tys.push(llarg_ty); @@ -998,7 +1040,3 @@ impl<'a, 'tcx> FnType<'tcx> { } } } - -pub fn align_up_to(off: u64, a: u64) -> u64 { - (off + a - 1) / a * a -} diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index cdf66a0835d..b5b90753553 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -42,10 +42,9 @@ //! taken to it, implementing them for Rust seems difficult. use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, Align, HasDataLayout, LayoutTyper, Size}; use context::CrateContext; -use machine; use monomorphize; use type_::Type; use type_of; @@ -134,9 +133,7 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } layout::UntaggedUnion { ref variants, .. }=> { // Use alignment-sized ints to fill all the union storage. - let size = variants.stride().bytes(); - let align = variants.align.abi(); - let fill = union_fill(cx, size, align); + let fill = union_fill(cx, variants.stride(), variants.align); match name { None => { Type::struct_(cx, &[fill], variants.packed) @@ -159,22 +156,18 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // So we start with the discriminant, pad it up to the alignment with // more of its own type, then use alignment-sized ints to get the rest // of the size. - let size = size.bytes(); - let align = align.abi(); - let primitive_align = primitive_align.abi(); - assert!(align <= ::std::u32::MAX as u64); let discr_ty = Type::from_integer(cx, discr); let discr_size = discr.size().bytes(); - let padded_discr_size = roundup(discr_size, align as u32); - let variant_part_size = size-padded_discr_size; - let variant_fill = union_fill(cx, variant_part_size, primitive_align); + let padded_discr_size = discr.size().abi_align(align); + let variant_part_size = size - padded_discr_size; - assert_eq!(machine::llalign_of_min(cx, variant_fill), primitive_align as u32); - assert_eq!(padded_discr_size % discr_size, 0); // Ensure discr_ty can fill pad evenly - let fields: Vec<Type> = - [discr_ty, - Type::array(&discr_ty, (padded_discr_size - discr_size)/discr_size), - variant_fill].iter().cloned().collect(); + // Ensure discr_ty can fill pad evenly + assert_eq!(padded_discr_size.bytes() % discr_size, 0); + let fields = [ + discr_ty, + Type::array(&discr_ty, padded_discr_size.bytes() / discr_size - 1), + union_fill(cx, variant_part_size, primitive_align) + ]; match name { None => { Type::struct_(cx, &fields, false) @@ -190,17 +183,19 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } -fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type { - assert_eq!(size%align, 0); - assert_eq!(align.count_ones(), 1, "Alignment must be a power fof 2. Got {}", align); - let align_units = size/align; - let layout_align = layout::Align::from_bytes(align, align).unwrap(); - if let Some(ity) = layout::Integer::for_abi_align(cx, layout_align) { - Type::array(&Type::from_integer(cx, ity), align_units) +fn union_fill(cx: &CrateContext, size: Size, align: Align) -> Type { + let abi_align = align.abi(); + let elem_ty = if let Some(ity) = layout::Integer::for_abi_align(cx, align) { + Type::from_integer(cx, ity) } else { - Type::array(&Type::vector(&Type::i32(cx), align/4), - align_units) - } + let vec_align = cx.data_layout().vector_align(Size::from_bytes(abi_align)); + assert_eq!(vec_align.abi(), abi_align); + Type::vector(&Type::i32(cx), abi_align / 4) + }; + + let size = size.bytes(); + assert_eq!(size % abi_align, 0); + Type::array(&elem_ty, size / abi_align) } // Lookup `Struct::memory_index` and double it to account for padding @@ -231,7 +226,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }; debug!("struct_llfields: variant: {:?}", variant); let mut first_field = true; - let mut min_offset = 0; + let mut offset = Size::from_bytes(0); let mut result: Vec<Type> = Vec::with_capacity(field_count * 2); let field_iter = variant.field_index_by_increasing_offset().map(|i| { (i, match t.sty { @@ -249,48 +244,47 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, cx.tcx().normalize_associated_type(&ty) }, _ => bug!() - }, variant.offsets[i as usize].bytes()) + }, variant.offsets[i as usize]) }); for (index, ty, target_offset) in field_iter { - assert!(target_offset >= min_offset); - let padding_bytes = target_offset - min_offset; + debug!("struct_llfields: {} ty: {} offset: {:?} target_offset: {:?}", + index, ty, offset, target_offset); + assert!(target_offset >= offset); + let padding = target_offset - offset; if first_field { - debug!("struct_llfields: {} ty: {} min_offset: {} target_offset: {}", - index, ty, min_offset, target_offset); - assert_eq!(padding_bytes, 0); + assert_eq!(padding.bytes(), 0); first_field = false; } else { - result.push(Type::array(&Type::i8(cx), padding_bytes)); - debug!("struct_llfields: {} ty: {} pad_bytes: {} min_offset: {} target_offset: {}", - index, ty, padding_bytes, min_offset, target_offset); + result.push(Type::array(&Type::i8(cx), padding.bytes())); + debug!(" padding before: {:?}", padding); } let llty = type_of::in_memory_type_of(cx, ty); result.push(llty); let layout = cx.layout_of(ty); if variant.packed { - assert_eq!(padding_bytes, 0); + assert_eq!(padding.bytes(), 0); } else { let field_align = layout.align(cx); assert!(field_align.abi() <= variant.align.abi(), "non-packed type has field with larger align ({}): {:#?}", field_align.abi(), variant); } - let target_size = layout.size(&cx.tcx().data_layout).bytes(); - min_offset = target_offset + target_size; + let target_size = layout.size(&cx.tcx().data_layout); + offset = target_offset + target_size; } if variant.sized && field_count > 0 { - if variant.stride().bytes() < min_offset { - bug!("variant: {:?} stride: {} min_offset: {}", variant, variant.stride().bytes(), - min_offset); + if offset > variant.stride() { + bug!("variant: {:?} stride: {:?} offset: {:?}", + variant, variant.stride(), offset); } - let padding_bytes = variant.stride().bytes() - min_offset; - debug!("struct_llfields: pad_bytes: {} min_offset: {} min_size: {} stride: {}\n", - padding_bytes, min_offset, variant.min_size.bytes(), variant.stride().bytes()); - result.push(Type::array(&Type::i8(cx), padding_bytes)); + let padding = variant.stride() - offset; + debug!("struct_llfields: pad_bytes: {:?} offset: {:?} min_size: {:?} stride: {:?}", + padding, offset, variant.min_size, variant.stride()); + result.push(Type::array(&Type::i8(cx), padding.bytes())); assert!(result.len() == (field_count * 2)); } else { - debug!("struct_llfields: min_offset: {} min_size: {} stride: {}\n", - min_offset, variant.min_size.bytes(), variant.stride().bytes()); + debug!("struct_llfields: offset: {:?} min_size: {:?} stride: {:?}", + offset, variant.min_size, variant.stride()); } result @@ -310,7 +304,3 @@ pub fn assert_discr_in_range<D: PartialOrd>(min: D, max: D, discr: D) { assert!(min <= discr || discr <= max) } } - -// FIXME this utility routine should be somewhere more general -#[inline] -fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 69bcd0aa50b..98ad6a54bd1 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -40,6 +40,7 @@ use rustc::middle::lang_items::StartFnLangItem; use rustc::middle::trans::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes}; use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::Align; use rustc::ty::maps::Providers; use rustc::dep_graph::{DepNode, DepKind, DepConstructor}; use rustc::middle::cstore::{self, LinkMeta, LinkagePreference}; @@ -55,7 +56,7 @@ use builder::Builder; use callee; use common::{C_bool, C_bytes_in_context, C_i32, C_usize}; use collector::{self, TransItemCollectionMode}; -use common::{C_struct_in_context, C_u64, C_undef, C_array}; +use common::{C_struct_in_context, C_undef, C_array}; use common::CrateContext; use common::{type_is_zero_size, val_ty}; use common; @@ -63,7 +64,6 @@ use consts; use context::{self, LocalCrateContext, SharedCrateContext}; use debuginfo; use declare; -use machine; use meth; use mir; use monomorphize::{self, Instance}; @@ -489,42 +489,11 @@ pub fn to_immediate(bcx: &Builder, val: ValueRef, ty: Ty) -> ValueRef { } } -pub enum Lifetime { Start, End } - -impl Lifetime { - // If LLVM lifetime intrinsic support is enabled (i.e. optimizations - // on), and `ptr` is nonzero-sized, then extracts the size of `ptr` - // and the intrinsic for `lt` and passes them to `emit`, which is in - // charge of generating code to call the passed intrinsic on whatever - // block of generated code is targeted for the intrinsic. - // - // If LLVM lifetime intrinsic support is disabled (i.e. optimizations - // off) or `ptr` is zero-sized, then no-op (does not call `emit`). - pub fn call(self, b: &Builder, ptr: ValueRef) { - if b.ccx.sess().opts.optimize == config::OptLevel::No { - return; - } - - let size = machine::llsize_of_alloc(b.ccx, val_ty(ptr).element_type()); - if size == 0 { - return; - } - - let lifetime_intrinsic = b.ccx.get_intrinsic(match self { - Lifetime::Start => "llvm.lifetime.start", - Lifetime::End => "llvm.lifetime.end" - }); - - let ptr = b.pointercast(ptr, Type::i8p(b.ccx)); - b.call(lifetime_intrinsic, &[C_u64(b.ccx, size), ptr], None); - } -} - -pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, - dst: ValueRef, - src: ValueRef, - n_bytes: ValueRef, - align: u32) { +pub fn call_memcpy(b: &Builder, + dst: ValueRef, + src: ValueRef, + n_bytes: ValueRef, + align: Align) { let ccx = b.ccx; let ptr_width = &ccx.sess().target.target.target_pointer_width; let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); @@ -532,7 +501,7 @@ pub fn call_memcpy<'a, 'tcx>(b: &Builder<'a, 'tcx>, let src_ptr = b.pointercast(src, Type::i8p(ccx)); let dst_ptr = b.pointercast(dst, Type::i8p(ccx)); let size = b.intcast(n_bytes, ccx.isize_ty(), false); - let align = C_i32(ccx, align as i32); + let align = C_i32(ccx, align.abi() as i32); let volatile = C_bool(ccx, false); b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); } @@ -542,11 +511,11 @@ pub fn memcpy_ty<'a, 'tcx>( dst: ValueRef, src: ValueRef, t: Ty<'tcx>, - align: Option<u32>, + align: Option<Align>, ) { let ccx = bcx.ccx; - let size = ccx.size_of(t); + let size = ccx.size_of(t).bytes(); if size == 0 { return; } diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index b366d5579c3..c8d8984122f 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -15,12 +15,12 @@ use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef}; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef}; use common::*; -use machine::llalign_of_pref; use type_::Type; use value::Value; use libc::{c_uint, c_char}; use rustc::ty::TyCtxt; -use rustc::session::Session; +use rustc::ty::layout::{Align, Size}; +use rustc::session::{config, Session}; use std::borrow::Cow; use std::ffi::CString; @@ -487,7 +487,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn alloca(&self, ty: Type, name: &str, align: Option<u32>) -> ValueRef { + pub fn alloca(&self, ty: Type, name: &str, align: Option<Align>) -> ValueRef { let builder = Builder::with_ccx(self.ccx); builder.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) @@ -495,7 +495,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { builder.dynamic_alloca(ty, name, align) } - pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Option<u32>) -> ValueRef { + pub fn dynamic_alloca(&self, ty: Type, name: &str, align: Option<Align>) -> ValueRef { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -506,7 +506,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { name.as_ptr()) }; if let Some(align) = align { - llvm::LLVMSetAlignment(alloca, align as c_uint); + llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); } alloca } @@ -519,12 +519,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn load(&self, ptr: ValueRef, align: Option<u32>) -> ValueRef { + pub fn load(&self, ptr: ValueRef, align: Option<Align>) -> ValueRef { self.count_insn("load"); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); if let Some(align) = align { - llvm::LLVMSetAlignment(load, align as c_uint); + llvm::LLVMSetAlignment(load, align.abi() as c_uint); } load } @@ -539,20 +539,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering) -> ValueRef { + pub fn atomic_load(&self, ptr: ValueRef, order: AtomicOrdering, align: Align) -> ValueRef { self.count_insn("load.atomic"); unsafe { - let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); - let align = llalign_of_pref(self.ccx, ty.element_type()); - llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order, - align as c_uint) + let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order); + llvm::LLVMSetAlignment(load, align.abi() as c_uint); + load } } pub fn load_range_assert(&self, ptr: ValueRef, lo: u64, hi: u64, signed: llvm::Bool, - align: Option<u32>) -> ValueRef { + align: Option<Align>) -> ValueRef { let value = self.load(ptr, align); unsafe { @@ -571,7 +570,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { value } - pub fn load_nonnull(&self, ptr: ValueRef, align: Option<u32>) -> ValueRef { + pub fn load_nonnull(&self, ptr: ValueRef, align: Option<Align>) -> ValueRef { let value = self.load(ptr, align); unsafe { llvm::LLVMSetMetadata(value, llvm::MD_nonnull as c_uint, @@ -581,7 +580,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { value } - pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option<u32>) -> ValueRef { + pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option<Align>) -> ValueRef { debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); assert!(!self.llbuilder.is_null()); self.count_insn("store"); @@ -589,7 +588,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { unsafe { let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); if let Some(align) = align { - llvm::LLVMSetAlignment(store, align as c_uint); + llvm::LLVMSetAlignment(store, align.abi() as c_uint); } store } @@ -607,14 +606,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } } - pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { + pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, + order: AtomicOrdering, align: Align) { debug!("Store {:?} -> {:?}", Value(val), Value(ptr)); self.count_insn("store.atomic"); let ptr = self.check_store(val, ptr); unsafe { - let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); - let align = llalign_of_pref(self.ccx, ty.element_type()); - llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order, align as c_uint); + let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order); + llvm::LLVMSetAlignment(store, align.abi() as c_uint); } } @@ -1233,4 +1232,36 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { return Cow::Owned(casted_args); } + + pub fn lifetime_start(&self, ptr: ValueRef, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); + } + + pub fn lifetime_end(&self, ptr: ValueRef, size: Size) { + self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); + } + + /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations + /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` + /// and the intrinsic for `lt` and passes them to `emit`, which is in + /// charge of generating code to call the passed intrinsic on whatever + /// block of generated code is targetted for the intrinsic. + /// + /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations + /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). + fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: ValueRef, size: Size) { + if self.ccx.sess().opts.optimize == config::OptLevel::No { + return; + } + + let size = size.bytes(); + if size == 0 { + return; + } + + let lifetime_intrinsic = self.ccx.get_intrinsic(intrinsic); + + let ptr = self.pointercast(ptr, Type::i8p(self.ccx)); + self.call(lifetime_intrinsic, &[C_u64(self.ccx, size), ptr], None); + } } diff --git a/src/librustc_trans/cabi_aarch64.rs b/src/librustc_trans/cabi_aarch64.rs index bf842e6358f..b021a060725 100644 --- a/src/librustc_trans/cabi_aarch64.rs +++ b/src/librustc_trans/cabi_aarch64.rs @@ -44,7 +44,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc return; } if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } let size = ret.layout.size(ccx); @@ -60,7 +60,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc Reg::i64() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); @@ -75,7 +75,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc return; } if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } let size = arg.layout.size(ccx); @@ -91,7 +91,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc Reg::i64() }; - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit, total: size }); diff --git a/src/librustc_trans/cabi_arm.rs b/src/librustc_trans/cabi_arm.rs index 635741b4d1a..370a950617a 100644 --- a/src/librustc_trans/cabi_arm.rs +++ b/src/librustc_trans/cabi_arm.rs @@ -47,7 +47,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc if vfp { if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } } @@ -62,7 +62,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } else { Reg::i32() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); @@ -79,14 +79,14 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc if vfp { if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } } let align = arg.layout.align(ccx).abi(); let total = arg.layout.size(ccx); - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, total }); diff --git a/src/librustc_trans/cabi_asmjs.rs b/src/librustc_trans/cabi_asmjs.rs index 6fcd3ed581d..047caa431c5 100644 --- a/src/librustc_trans/cabi_asmjs.rs +++ b/src/librustc_trans/cabi_asmjs.rs @@ -21,7 +21,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc if let Some(unit) = ret.layout.homogeneous_aggregate(ccx) { let size = ret.layout.size(ccx); if unit.size == size { - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); diff --git a/src/librustc_trans/cabi_mips.rs b/src/librustc_trans/cabi_mips.rs index b7b60859d4a..baab7036741 100644 --- a/src/librustc_trans/cabi_mips.rs +++ b/src/librustc_trans/cabi_mips.rs @@ -8,45 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp; -use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +use rustc::ty::layout::Size; + +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { ret.make_indirect(ccx); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); + let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i32()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_mips64.rs b/src/librustc_trans/cabi_mips64.rs index dff75e628de..1cb63e72fb9 100644 --- a/src/librustc_trans/cabi_mips64.rs +++ b/src/librustc_trans/cabi_mips64.rs @@ -8,45 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp; -use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +use rustc::ty::layout::Size; + +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(64); } else { ret.make_indirect(ccx); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); + let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i64(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i64()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i64()); } } else { arg.extend_integer_width_to(64); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 8 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_powerpc.rs b/src/librustc_trans/cabi_powerpc.rs index f951ac76391..df320fb00ab 100644 --- a/src/librustc_trans/cabi_powerpc.rs +++ b/src/librustc_trans/cabi_powerpc.rs @@ -8,46 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{align_up_to, FnType, ArgType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -use std::cmp; +use rustc::ty::layout::Size; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { ret.make_indirect(ccx); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); + let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i32()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i32()); } } else { arg.extend_integer_width_to(32); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_powerpc64.rs b/src/librustc_trans/cabi_powerpc64.rs index fb5472eb6ae..9a9d6f8d0ac 100644 --- a/src/librustc_trans/cabi_powerpc64.rs +++ b/src/librustc_trans/cabi_powerpc64.rs @@ -67,7 +67,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } if let Some(uniform) = is_homogeneous_aggregate(ccx, ret, abi) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } @@ -84,7 +84,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc Reg::i64() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); @@ -101,7 +101,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc } if let Some(uniform) = is_homogeneous_aggregate(ccx, arg, abi) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } @@ -124,7 +124,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc }, }; - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit, total }); diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index fedebea3f4c..ffe2940a028 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -49,16 +49,16 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc if is_single_fp_element(ccx, arg.layout) { match size.bytes() { - 4 => arg.cast_to(ccx, Reg::f32()), - 8 => arg.cast_to(ccx, Reg::f64()), + 4 => arg.cast_to(Reg::f32()), + 8 => arg.cast_to(Reg::f64()), _ => arg.make_indirect(ccx) } } else { match size.bytes() { - 1 => arg.cast_to(ccx, Reg::i8()), - 2 => arg.cast_to(ccx, Reg::i16()), - 4 => arg.cast_to(ccx, Reg::i32()), - 8 => arg.cast_to(ccx, Reg::i64()), + 1 => arg.cast_to(Reg::i8()), + 2 => arg.cast_to(Reg::i16()), + 4 => arg.cast_to(Reg::i32()), + 8 => arg.cast_to(Reg::i64()), _ => arg.make_indirect(ccx) } } diff --git a/src/librustc_trans/cabi_sparc.rs b/src/librustc_trans/cabi_sparc.rs index c17901e1ade..baab7036741 100644 --- a/src/librustc_trans/cabi_sparc.rs +++ b/src/librustc_trans/cabi_sparc.rs @@ -8,45 +8,48 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use std::cmp; -use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform}; +use abi::{ArgType, FnType, LayoutExt, Reg, Uniform}; use context::CrateContext; -fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { +use rustc::ty::layout::Size; + +fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + ret: &mut ArgType<'tcx>, + offset: &mut Size) { if !ret.layout.is_aggregate() { ret.extend_integer_width_to(32); } else { ret.make_indirect(ccx); + *offset += ccx.tcx().data_layout.pointer_size; } } -fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) { +fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) { + let dl = &ccx.tcx().data_layout; let size = arg.layout.size(ccx); - let mut align = arg.layout.align(ccx).abi(); - align = cmp::min(cmp::max(align, 4), 8); + let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i32(), total: size }); - if ((align - 1) & *offset) > 0 { - arg.pad_with(ccx, Reg::i32()); + if !offset.is_abi_aligned(align) { + arg.pad_with(Reg::i32()); } } else { - arg.extend_integer_width_to(32) + arg.extend_integer_width_to(32); } - *offset = align_up_to(*offset, align); - *offset += align_up_to(size.bytes(), align); + *offset = offset.abi_align(align) + size.abi_align(align); } pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) { + let mut offset = Size::from_bytes(0); if !fty.ret.is_ignore() { - classify_ret_ty(ccx, &mut fty.ret); + classify_ret_ty(ccx, &mut fty.ret, &mut offset); } - let mut offset = if fty.ret.is_indirect() { 4 } else { 0 }; for arg in &mut fty.args { if arg.is_ignore() { continue; } classify_arg_ty(ccx, arg, &mut offset); diff --git a/src/librustc_trans/cabi_sparc64.rs b/src/librustc_trans/cabi_sparc64.rs index 8383007550e..788fba9dc26 100644 --- a/src/librustc_trans/cabi_sparc64.rs +++ b/src/librustc_trans/cabi_sparc64.rs @@ -47,7 +47,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } if let Some(uniform) = is_homogeneous_aggregate(ccx, ret) { - ret.cast_to(ccx, uniform); + ret.cast_to(uniform); return; } let size = ret.layout.size(ccx); @@ -63,7 +63,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc Reg::i64() }; - ret.cast_to(ccx, Uniform { + ret.cast_to(Uniform { unit, total: size }); @@ -81,12 +81,12 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc } if let Some(uniform) = is_homogeneous_aggregate(ccx, arg) { - arg.cast_to(ccx, uniform); + arg.cast_to(uniform); return; } let total = arg.layout.size(ccx); - arg.cast_to(ccx, Uniform { + arg.cast_to(Uniform { unit: Reg::i64(), total }); diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index 49634d6e78c..b34337ae5f6 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -56,16 +56,16 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // float aggregates directly in a floating-point register. if !t.options.is_like_msvc && is_single_fp_element(ccx, fty.ret.layout) { match size.bytes() { - 4 => fty.ret.cast_to(ccx, Reg::f32()), - 8 => fty.ret.cast_to(ccx, Reg::f64()), + 4 => fty.ret.cast_to(Reg::f32()), + 8 => fty.ret.cast_to(Reg::f64()), _ => fty.ret.make_indirect(ccx) } } else { match size.bytes() { - 1 => fty.ret.cast_to(ccx, Reg::i8()), - 2 => fty.ret.cast_to(ccx, Reg::i16()), - 4 => fty.ret.cast_to(ccx, Reg::i32()), - 8 => fty.ret.cast_to(ccx, Reg::i64()), + 1 => fty.ret.cast_to(Reg::i8()), + 2 => fty.ret.cast_to(Reg::i16()), + 4 => fty.ret.cast_to(Reg::i32()), + 8 => fty.ret.cast_to(Reg::i64()), _ => fty.ret.make_indirect(ccx) } } diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index a814f458e12..6670d084d6c 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -34,9 +34,9 @@ const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64; fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) -> Result<[Class; MAX_EIGHTBYTES], Memory> { fn unify(cls: &mut [Class], - off: u64, + off: Size, c: Class) { - let i = (off / 8) as usize; + let i = (off.bytes() / 8) as usize; let to_write = match (cls[i], c) { (Class::None, _) => c, (_, Class::None) => return, @@ -55,9 +55,9 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>, cls: &mut [Class], - off: u64) + off: Size) -> Result<(), Memory> { - if off % layout.align(ccx).abi() != 0 { + if !off.is_abi_aligned(layout.align(ccx)) { if layout.size(ccx).bytes() > 0 { return Err(Memory); } @@ -85,25 +85,25 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) // everything after the first one is the upper // half of a register. - let eltsz = element.size(ccx).bytes(); + let eltsz = element.size(ccx); for i in 1..count { - unify(cls, off + i * eltsz, Class::SseUp); + unify(cls, off + eltsz * i, Class::SseUp); } } Layout::Array { count, .. } => { if count > 0 { let elt = layout.field(ccx, 0); - let eltsz = elt.size(ccx).bytes(); + let eltsz = elt.size(ccx); for i in 0..count { - classify(ccx, elt, cls, off + i * eltsz)?; + classify(ccx, elt, cls, off + eltsz * i)?; } } } Layout::Univariant { ref variant, .. } => { for i in 0..layout.field_count() { - let field_off = off + variant.offsets[i].bytes(); + let field_off = off + variant.offsets[i]; classify(ccx, layout.field(ccx, i), cls, field_off)?; } } @@ -128,7 +128,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } let mut cls = [Class::None; MAX_EIGHTBYTES]; - classify(ccx, arg.layout, &mut cls, 0)?; + classify(ccx, arg.layout, &mut cls, Size::from_bytes(0))?; if n > 2 { if cls[0] != Class::Sse { return Err(Memory); @@ -153,7 +153,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) Ok(cls) } -fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option<Reg> { +fn reg_component(cls: &[Class], i: &mut usize, size: Size) -> Option<Reg> { if *i >= cls.len() { return None; } @@ -162,7 +162,7 @@ fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option<Reg> { Class::None => None, Class::Int => { *i += 1; - Some(match size { + Some(match size.bytes() { 1 => Reg::i8(), 2 => Reg::i16(), 3 | @@ -174,14 +174,14 @@ fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option<Reg> { let vec_len = 1 + cls[*i+1..].iter().take_while(|&&c| c == Class::SseUp).count(); *i += vec_len; Some(if vec_len == 1 { - match size { + match size.bytes() { 4 => Reg::f32(), _ => Reg::f64() } } else { Reg { kind: RegKind::Vector, - size: Size::from_bytes(vec_len as u64 * 8) + size: Size::from_bytes(8) * (vec_len as u64) } }) } @@ -189,17 +189,17 @@ fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option<Reg> { } } -fn cast_target(cls: &[Class], size: u64) -> CastTarget { +fn cast_target(cls: &[Class], size: Size) -> CastTarget { let mut i = 0; let lo = reg_component(cls, &mut i, size).unwrap(); - let offset = i as u64 * 8; + let offset = Size::from_bytes(8) * (i as u64); let target = if size <= offset { CastTarget::from(lo) } else { let hi = reg_component(cls, &mut i, size - offset).unwrap(); CastTarget::Pair(lo, hi) }; - assert_eq!(reg_component(cls, &mut i, 0), None); + assert_eq!(reg_component(cls, &mut i, Size::from_bytes(0)), None); target } @@ -242,8 +242,8 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType sse_regs -= needed_sse; if arg.layout.is_aggregate() { - let size = arg.layout.size(ccx).bytes(); - arg.cast_to(ccx, cast_target(cls.as_ref().unwrap(), size)) + let size = arg.layout.size(ccx); + arg.cast_to(cast_target(cls.as_ref().unwrap(), size)) } else { arg.extend_integer_width_to(32); } diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs index 39e728d4e4f..1d391da5993 100644 --- a/src/librustc_trans/cabi_x86_win64.rs +++ b/src/librustc_trans/cabi_x86_win64.rs @@ -20,10 +20,10 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType let size = a.layout.size(ccx); if a.layout.is_aggregate() { match size.bits() { - 8 => a.cast_to(ccx, Reg::i8()), - 16 => a.cast_to(ccx, Reg::i16()), - 32 => a.cast_to(ccx, Reg::i32()), - 64 => a.cast_to(ccx, Reg::i64()), + 8 => a.cast_to(Reg::i8()), + 16 => a.cast_to(Reg::i16()), + 32 => a.cast_to(Reg::i32()), + 64 => a.cast_to(Reg::i64()), _ => a.make_indirect(ccx) }; } else { diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index e3856cabcf9..e3ee8f7c75a 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -22,13 +22,12 @@ use base; use builder::Builder; use consts; use declare; -use machine; use monomorphize; use type_::Type; use value::Value; use rustc::traits; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{Layout, LayoutTyper}; +use rustc::ty::layout::{HasDataLayout, Layout, LayoutTyper}; use rustc::ty::subst::{Kind, Subst, Substs}; use rustc::hir; @@ -252,10 +251,6 @@ pub fn C_big_integral(t: Type, u: u128) -> ValueRef { } } -pub fn C_nil(ccx: &CrateContext) -> ValueRef { - C_struct(ccx, &[], false) -} - pub fn C_bool(ccx: &CrateContext, val: bool) -> ValueRef { C_uint(Type::i1(ccx), val as u64) } @@ -273,8 +268,7 @@ pub fn C_u64(ccx: &CrateContext, i: u64) -> ValueRef { } pub fn C_usize(ccx: &CrateContext, i: u64) -> ValueRef { - let bit_size = machine::llbitsize_of_real(ccx, ccx.isize_ty()); - + let bit_size = ccx.data_layout().pointer_size.bits(); if bit_size < 64 { // make sure it doesn't overflow assert!(i < (1<<bit_size)); diff --git a/src/librustc_trans/consts.rs b/src/librustc_trans/consts.rs index 4ae289cfada..83ecbbed76b 100644 --- a/src/librustc_trans/consts.rs +++ b/src/librustc_trans/consts.rs @@ -14,7 +14,7 @@ use llvm::{ValueRef, True}; use rustc::hir::def_id::DefId; use rustc::hir::map as hir_map; use rustc::middle::const_val::ConstEvalErr; -use {debuginfo, machine}; +use debuginfo; use base; use trans_item::{TransItem, TransItemExt}; use common::{self, CrateContext, val_ty}; @@ -23,10 +23,10 @@ use monomorphize::Instance; use type_::Type; use type_of; use rustc::ty; +use rustc::ty::layout::Align; use rustc::hir; -use std::cmp; use std::ffi::{CStr, CString}; use syntax::ast; use syntax::attr; @@ -45,26 +45,26 @@ pub fn bitcast(val: ValueRef, ty: Type) -> ValueRef { fn set_global_alignment(ccx: &CrateContext, gv: ValueRef, - mut align: machine::llalign) { + mut align: Align) { // The target may require greater alignment for globals than the type does. // Note: GCC and Clang also allow `__attribute__((aligned))` on variables, // which can force it to be smaller. Rust doesn't support this yet. if let Some(min) = ccx.sess().target.target.options.min_global_align { match ty::layout::Align::from_bits(min, min) { - Ok(min) => align = cmp::max(align, min.abi() as machine::llalign), + Ok(min) => align = align.max(min), Err(err) => { ccx.sess().err(&format!("invalid minimum global alignment: {}", err)); } } } unsafe { - llvm::LLVMSetAlignment(gv, align); + llvm::LLVMSetAlignment(gv, align.abi() as u32); } } pub fn addr_of_mut(ccx: &CrateContext, cv: ValueRef, - align: machine::llalign, + align: Align, kind: &str) -> ValueRef { unsafe { @@ -82,15 +82,16 @@ pub fn addr_of_mut(ccx: &CrateContext, pub fn addr_of(ccx: &CrateContext, cv: ValueRef, - align: machine::llalign, + align: Align, kind: &str) -> ValueRef { if let Some(&gv) = ccx.const_globals().borrow().get(&cv) { unsafe { // Upgrade the alignment in cases where the same constant is used with different // alignment requirements - if align > llvm::LLVMGetAlignment(gv) { - llvm::LLVMSetAlignment(gv, align); + let llalign = align.abi() as u32; + if llalign > llvm::LLVMGetAlignment(gv) { + llvm::LLVMSetAlignment(gv, llalign); } } return gv; diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index a68390eab7f..d2e2e1bbdee 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -9,11 +9,10 @@ // except according to those terms. use self::RecursiveTypeDescription::*; -use self::MemberOffset::*; use self::MemberDescriptionFactory::*; use self::EnumDiscriminantInfo::*; -use super::utils::{debug_context, DIB, span_start, bytes_to_bits, size_and_align_of, +use super::utils::{debug_context, DIB, span_start, get_namespace_for_item, create_DIArray, is_node_local_to_unit}; use super::namespace::mangled_name_of_item; use super::type_names::compute_debuginfo_type_name; @@ -30,13 +29,11 @@ use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE}; use rustc::ty::fold::TypeVisitor; use rustc::ty::subst::Substs; use rustc::ty::util::TypeIdHasher; -use rustc::hir; use rustc::ich::Fingerprint; -use {type_of, machine, monomorphize}; +use monomorphize; use common::{self, CrateContext}; -use type_::Type; use rustc::ty::{self, AdtKind, Ty}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, Align, LayoutTyper, Size}; use rustc::session::{Session, config}; use rustc::util::nodemap::FxHashMap; use rustc::util::common::path2cstr; @@ -184,7 +181,6 @@ enum RecursiveTypeDescription<'tcx> { unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: DICompositeType, - llvm_type: Type, member_description_factory: MemberDescriptionFactory<'tcx>, }, FinalMetadata(DICompositeType) @@ -195,7 +191,6 @@ fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>( unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: DICompositeType, - llvm_type: Type, member_description_factory: MemberDescriptionFactory<'tcx>) -> RecursiveTypeDescription<'tcx> { @@ -208,7 +203,6 @@ fn create_and_register_recursive_type_forward_declaration<'a, 'tcx>( unfinished_type, unique_type_id, metadata_stub, - llvm_type, member_description_factory, } } @@ -224,9 +218,7 @@ impl<'tcx> RecursiveTypeDescription<'tcx> { unfinished_type, unique_type_id, metadata_stub, - llvm_type, ref member_description_factory, - .. } => { // Make sure that we have a forward declaration of the type in // the TypeMap so that recursive references are possible. This @@ -251,7 +243,6 @@ impl<'tcx> RecursiveTypeDescription<'tcx> { // ... and attach them to the stub to complete it. set_members_of_composite_type(cx, metadata_stub, - llvm_type, &member_descriptions[..]); return MetadataCreationResult::new(metadata_stub, true); } @@ -274,20 +265,21 @@ macro_rules! return_if_metadata_created_in_meantime { fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, unique_type_id: UniqueTypeId, + array_or_slice_type: Ty<'tcx>, element_type: Ty<'tcx>, - len: Option<u64>, span: Span) -> MetadataCreationResult { let element_type_metadata = type_metadata(cx, element_type, span); return_if_metadata_created_in_meantime!(cx, unique_type_id); - let element_llvm_type = type_of::type_of(cx, element_type); - let (element_type_size, element_type_align) = size_and_align_of(cx, element_llvm_type); + let (size, align) = cx.size_and_align_of(array_or_slice_type); - let (array_size_in_bytes, upper_bound) = match len { - Some(len) => (element_type_size * len, len as c_longlong), - None => (0, -1) + let upper_bound = match array_or_slice_type.sty { + ty::TyArray(_, len) => { + len.val.to_const_int().unwrap().to_u64().unwrap() as c_longlong + } + _ => -1 }; let subrange = unsafe { @@ -298,8 +290,8 @@ fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let metadata = unsafe { llvm::LLVMRustDIBuilderCreateArrayType( DIB(cx), - bytes_to_bits(array_size_in_bytes), - bytes_to_bits(element_type_align), + size.bits(), + align.abi_bits() as u32, element_type_metadata, subscripts) }; @@ -308,66 +300,52 @@ fn fixed_vec_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } fn vec_slice_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - vec_type: Ty<'tcx>, + slice_ptr_type: Ty<'tcx>, element_type: Ty<'tcx>, unique_type_id: UniqueTypeId, span: Span) -> MetadataCreationResult { - let data_ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut { - ty: element_type, - mutbl: hir::MutImmutable - }); + let data_ptr_type = cx.tcx().mk_imm_ptr(element_type); - let element_type_metadata = type_metadata(cx, data_ptr_type, span); + let data_ptr_metadata = type_metadata(cx, data_ptr_type, span); return_if_metadata_created_in_meantime!(cx, unique_type_id); - let slice_llvm_type = type_of::type_of(cx, vec_type); - let slice_type_name = compute_debuginfo_type_name(cx, vec_type, true); + let slice_type_name = compute_debuginfo_type_name(cx, slice_ptr_type, true); + + let (pointer_size, pointer_align) = cx.size_and_align_of(data_ptr_type); + let (usize_size, usize_align) = cx.size_and_align_of(cx.tcx().types.usize); - let member_llvm_types = slice_llvm_type.field_types(); - assert!(slice_layout_is_correct(cx, - &member_llvm_types[..], - element_type)); let member_descriptions = [ MemberDescription { name: "data_ptr".to_string(), - llvm_type: member_llvm_types[0], - type_metadata: element_type_metadata, - offset: ComputedMemberOffset, + type_metadata: data_ptr_metadata, + offset: Size::from_bytes(0), + size: pointer_size, + align: pointer_align, flags: DIFlags::FlagZero, }, MemberDescription { name: "length".to_string(), - llvm_type: member_llvm_types[1], type_metadata: type_metadata(cx, cx.tcx().types.usize, span), - offset: ComputedMemberOffset, + offset: pointer_size, + size: usize_size, + align: usize_align, flags: DIFlags::FlagZero, }, ]; - assert!(member_descriptions.len() == member_llvm_types.len()); - let file_metadata = unknown_file_metadata(cx); let metadata = composite_type_metadata(cx, - slice_llvm_type, + slice_ptr_type, &slice_type_name[..], unique_type_id, &member_descriptions, NO_SCOPE_METADATA, file_metadata, span); - return MetadataCreationResult::new(metadata, false); - - fn slice_layout_is_correct<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - member_llvm_types: &[Type], - element_type: Ty<'tcx>) - -> bool { - member_llvm_types.len() == 2 && - member_llvm_types[0] == type_of::type_of(cx, element_type).ptr_to() && - member_llvm_types[1] == cx.isize_ty() - } + MetadataCreationResult::new(metadata, false) } fn subroutine_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, @@ -436,38 +414,38 @@ fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let trait_type_name = compute_debuginfo_type_name(cx, trait_object_type, false); - let trait_llvm_type = type_of::type_of(cx, trait_object_type); let file_metadata = unknown_file_metadata(cx); - - let ptr_type = cx.tcx().mk_ptr(ty::TypeAndMut { - ty: cx.tcx().types.u8, - mutbl: hir::MutImmutable - }); - let ptr_type_metadata = type_metadata(cx, ptr_type, syntax_pos::DUMMY_SP); - let llvm_type = type_of::type_of(cx, ptr_type); + let layout = cx.layout_of(cx.tcx().mk_mut_ptr(trait_type)); assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); + + let data_ptr_field = layout.field(cx, 0); + let vtable_field = layout.field(cx, 1); let member_descriptions = [ MemberDescription { name: "pointer".to_string(), - llvm_type: llvm_type, - type_metadata: ptr_type_metadata, - offset: ComputedMemberOffset, + type_metadata: type_metadata(cx, + cx.tcx().mk_mut_ptr(cx.tcx().types.u8), + syntax_pos::DUMMY_SP), + offset: layout.field_offset(cx, 0), + size: data_ptr_field.size(cx), + align: data_ptr_field.align(cx), flags: DIFlags::FlagArtificial, }, MemberDescription { name: "vtable".to_string(), - llvm_type: llvm_type, - type_metadata: ptr_type_metadata, - offset: ComputedMemberOffset, + type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP), + offset: layout.field_offset(cx, 1), + size: vtable_field.size(cx), + align: vtable_field.align(cx), flags: DIFlags::FlagArtificial, }, ]; composite_type_metadata(cx, - trait_llvm_type, + trait_object_type, &trait_type_name[..], unique_type_id, &member_descriptions, @@ -556,15 +534,12 @@ pub fn type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty::TyTuple(ref elements, _) if elements.is_empty() => { MetadataCreationResult::new(basic_type_metadata(cx, t), false) } - ty::TyArray(typ, len) => { - let len = len.val.to_const_int().unwrap().to_u64().unwrap(); - fixed_vec_metadata(cx, unique_type_id, typ, Some(len), usage_site_span) - } + ty::TyArray(typ, _) | ty::TySlice(typ) => { - fixed_vec_metadata(cx, unique_type_id, typ, None, usage_site_span) + fixed_vec_metadata(cx, unique_type_id, t, typ, usage_site_span) } ty::TyStr => { - fixed_vec_metadata(cx, unique_type_id, cx.tcx().types.i8, None, usage_site_span) + fixed_vec_metadata(cx, unique_type_id, t, cx.tcx().types.i8, usage_site_span) } ty::TyDynamic(..) => { MetadataCreationResult::new( @@ -770,15 +745,14 @@ fn basic_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, _ => bug!("debuginfo::basic_type_metadata - t is invalid type") }; - let llvm_type = type_of::type_of(cx, t); - let (size, align) = size_and_align_of(cx, llvm_type); + let (size, align) = cx.size_and_align_of(t); let name = CString::new(name).unwrap(); let ty_metadata = unsafe { llvm::LLVMRustDIBuilderCreateBasicType( DIB(cx), name.as_ptr(), - bytes_to_bits(size), - bytes_to_bits(align), + size.bits(), + align.abi_bits() as u32, encoding) }; @@ -790,29 +764,25 @@ fn foreign_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, unique_type_id: UniqueTypeId) -> DIType { debug!("foreign_type_metadata: {:?}", t); - let llvm_type = type_of::type_of(cx, t); - let name = compute_debuginfo_type_name(cx, t, false); - create_struct_stub(cx, llvm_type, &name, unique_type_id, NO_SCOPE_METADATA) + create_struct_stub(cx, t, &name, unique_type_id, NO_SCOPE_METADATA) } fn pointer_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, pointer_type: Ty<'tcx>, pointee_type_metadata: DIType) -> DIType { - let pointer_llvm_type = type_of::type_of(cx, pointer_type); - let (pointer_size, pointer_align) = size_and_align_of(cx, pointer_llvm_type); + let (pointer_size, pointer_align) = cx.size_and_align_of(pointer_type); let name = compute_debuginfo_type_name(cx, pointer_type, false); let name = CString::new(name).unwrap(); - let ptr_metadata = unsafe { + unsafe { llvm::LLVMRustDIBuilderCreatePointerType( DIB(cx), pointee_type_metadata, - bytes_to_bits(pointer_size), - bytes_to_bits(pointer_align), + pointer_size.bits(), + pointer_align.abi_bits() as u32, name.as_ptr()) - }; - return ptr_metadata; + } } pub fn compile_unit_metadata(scc: &SharedCrateContext, @@ -907,21 +877,15 @@ impl MetadataCreationResult { } } -#[derive(Debug)] -enum MemberOffset { - FixedMemberOffset { bytes: usize }, - // For ComputedMemberOffset, the offset is read from the llvm type definition. - ComputedMemberOffset -} - // Description of a type member, which can either be a regular field (as in // structs or tuples) or an enum variant. #[derive(Debug)] struct MemberDescription { name: String, - llvm_type: Type, type_metadata: DIType, - offset: MemberOffset, + offset: Size, + size: Size, + align: Align, flags: DIFlags, } @@ -998,13 +962,13 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { }; let fty = monomorphize::field_ty(cx.tcx(), self.substs, f); - let offset = FixedMemberOffset { bytes: offsets[i].bytes() as usize}; - + let (size, align) = cx.size_and_align_of(fty); MemberDescription { name, - llvm_type: type_of::in_memory_type_of(cx, fty), type_metadata: type_metadata(cx, fty, self.span), - offset, + offset: offsets[i], + size, + align, flags: DIFlags::FlagZero, } }).collect() @@ -1018,7 +982,6 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span: Span) -> RecursiveTypeDescription<'tcx> { let struct_name = compute_debuginfo_type_name(cx, struct_type, false); - let struct_llvm_type = type_of::in_memory_type_of(cx, struct_type); let (struct_def_id, variant, substs) = match struct_type.sty { ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), @@ -1028,7 +991,7 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let containing_scope = get_namespace_for_item(cx, struct_def_id); let struct_metadata_stub = create_struct_stub(cx, - struct_llvm_type, + struct_type, &struct_name, unique_type_id, containing_scope); @@ -1038,7 +1001,6 @@ fn prepare_struct_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, struct_type, unique_type_id, struct_metadata_stub, - struct_llvm_type, StructMDF(StructMemberDescriptionFactory { ty: struct_type, variant, @@ -1069,15 +1031,14 @@ impl<'tcx> TupleMemberDescriptionFactory<'tcx> { bug!("{} is not a tuple", self.ty); }; - self.component_types - .iter() - .enumerate() - .map(|(i, &component_type)| { + self.component_types.iter().enumerate().map(|(i, &component_type)| { + let (size, align) = cx.size_and_align_of(component_type); MemberDescription { name: format!("__{}", i), - llvm_type: type_of::type_of(cx, component_type), type_metadata: type_metadata(cx, component_type, self.span), - offset: FixedMemberOffset { bytes: offsets[i].bytes() as usize }, + offset: offsets[i], + size, + align, flags: DIFlags::FlagZero, } }).collect() @@ -1091,18 +1052,16 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span: Span) -> RecursiveTypeDescription<'tcx> { let tuple_name = compute_debuginfo_type_name(cx, tuple_type, false); - let tuple_llvm_type = type_of::type_of(cx, tuple_type); create_and_register_recursive_type_forward_declaration( cx, tuple_type, unique_type_id, create_struct_stub(cx, - tuple_llvm_type, + tuple_type, &tuple_name[..], unique_type_id, NO_SCOPE_METADATA), - tuple_llvm_type, TupleMDF(TupleMemberDescriptionFactory { ty: tuple_type, component_types: component_types.to_vec(), @@ -1126,11 +1085,13 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> { -> Vec<MemberDescription> { self.variant.fields.iter().map(|field| { let fty = monomorphize::field_ty(cx.tcx(), self.substs, field); + let (size, align) = cx.size_and_align_of(fty); MemberDescription { name: field.name.to_string(), - llvm_type: type_of::type_of(cx, fty), type_metadata: type_metadata(cx, fty, self.span), - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size, + align, flags: DIFlags::FlagZero, } }).collect() @@ -1143,7 +1104,6 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span: Span) -> RecursiveTypeDescription<'tcx> { let union_name = compute_debuginfo_type_name(cx, union_type, false); - let union_llvm_type = type_of::in_memory_type_of(cx, union_type); let (union_def_id, variant, substs) = match union_type.sty { ty::TyAdt(def, substs) => (def.did, def.struct_variant(), substs), @@ -1153,7 +1113,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let containing_scope = get_namespace_for_item(cx, union_def_id); let union_metadata_stub = create_union_stub(cx, - union_llvm_type, + union_type, &union_name, unique_type_id, containing_scope); @@ -1163,7 +1123,6 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, union_type, unique_type_id, union_metadata_stub, - union_llvm_type, UnionMDF(UnionMemberDescriptionFactory { variant, substs, @@ -1206,9 +1165,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { .iter() .enumerate() .map(|(i, struct_def)| { - let (variant_type_metadata, - variant_llvm_type, - member_desc_factory) = + let (variant_type_metadata, member_desc_factory) = describe_enum_variant(cx, self.enum_type, struct_def, @@ -1222,13 +1179,13 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { set_members_of_composite_type(cx, variant_type_metadata, - variant_llvm_type, &member_descriptions); MemberDescription { name: "".to_string(), - llvm_type: variant_llvm_type, type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size: struct_def.stride(), + align: struct_def.align, flags: DIFlags::FlagZero } }).collect() @@ -1239,9 +1196,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { if adt.variants.is_empty() { vec![] } else { - let (variant_type_metadata, - variant_llvm_type, - member_description_factory) = + let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, self.enum_type, variant, @@ -1255,14 +1210,14 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { set_members_of_composite_type(cx, variant_type_metadata, - variant_llvm_type, &member_descriptions[..]); vec![ MemberDescription { name: "".to_string(), - llvm_type: variant_llvm_type, type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size: variant.stride(), + align: variant.align, flags: DIFlags::FlagZero } ] @@ -1278,15 +1233,10 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { let non_null_variant_name = non_null_variant.name.as_str(); // The llvm type and metadata of the pointer - let nnty = monomorphize::field_ty(cx.tcx(), &substs, &non_null_variant.fields[0] ); - let non_null_llvm_type = type_of::type_of(cx, nnty); + let nnty = monomorphize::field_ty(cx.tcx(), &substs, &non_null_variant.fields[0]); + let (size, align) = cx.size_and_align_of(nnty); let non_null_type_metadata = type_metadata(cx, nnty, self.span); - // The type of the artificial struct wrapping the pointer - let artificial_struct_llvm_type = Type::struct_(cx, - &[non_null_llvm_type], - false); - // For the metadata of the wrapper struct, we need to create a // MemberDescription of the struct's single field. let sole_struct_member_description = MemberDescription { @@ -1297,9 +1247,10 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { } CtorKind::Const => bug!() }, - llvm_type: non_null_llvm_type, type_metadata: non_null_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size, + align, flags: DIFlags::FlagZero }; @@ -1313,7 +1264,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // Now we can create the metadata of the artificial struct let artificial_struct_metadata = composite_type_metadata(cx, - artificial_struct_llvm_type, + nnty, &non_null_variant_name, unique_type_id, &[sole_struct_member_description], @@ -1334,9 +1285,10 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { vec![ MemberDescription { name: union_member_name, - llvm_type: artificial_struct_llvm_type, type_metadata: artificial_struct_metadata, - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size, + align, flags: DIFlags::FlagZero } ] @@ -1345,7 +1297,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { nndiscr, ref discrfield_source, ..} => { // Create a description of the non-null variant - let (variant_type_metadata, variant_llvm_type, member_description_factory) = + let (variant_type_metadata, member_description_factory) = describe_enum_variant(cx, self.enum_type, struct_def, @@ -1359,7 +1311,6 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { set_members_of_composite_type(cx, variant_type_metadata, - variant_llvm_type, &variant_member_descriptions[..]); // Encode the information about the null variant in the union @@ -1378,9 +1329,10 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { vec![ MemberDescription { name: union_member_name, - llvm_type: variant_llvm_type, type_metadata: variant_type_metadata, - offset: FixedMemberOffset { bytes: 0 }, + offset: Size::from_bytes(0), + size: struct_def.stride(), + align: struct_def.align, flags: DIFlags::FlagZero } ] @@ -1404,14 +1356,16 @@ impl<'tcx> VariantMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec<MemberDescription> { self.args.iter().enumerate().map(|(i, &(ref name, ty))| { + let (size, align) = cx.size_and_align_of(ty); MemberDescription { name: name.to_string(), - llvm_type: type_of::type_of(cx, ty), type_metadata: match self.discriminant_type_metadata { Some(metadata) if i == 0 => metadata, _ => type_metadata(cx, ty, self.span) }, - offset: FixedMemberOffset { bytes: self.offsets[i].bytes() as usize }, + offset: self.offsets[i], + size, + align, flags: DIFlags::FlagZero } }).collect() @@ -1436,7 +1390,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, discriminant_info: EnumDiscriminantInfo, containing_scope: DIScope, span: Span) - -> (DICompositeType, Type, MemberDescriptionFactory<'tcx>) { + -> (DICompositeType, MemberDescriptionFactory<'tcx>) { let substs = match enum_type.sty { ty::TyAdt(def, s) if def.adt_kind() == AdtKind::Enum => s, ref t @ _ => bug!("{:#?} is not an enum", t) @@ -1456,17 +1410,9 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, }).collect::<Vec<_>>(); if let Some((discr, signed)) = maybe_discr_and_signed { - field_tys.insert(0, discr.to_ty(&cx.tcx(), signed)); + field_tys.insert(0, discr.to_ty(cx.tcx(), signed)); } - - let variant_llvm_type = - Type::struct_(cx, &field_tys - .iter() - .map(|t| type_of::type_of(cx, t)) - .collect::<Vec<_>>() - , - struct_def.packed); // Could do some consistency checks here: size, align, field count, discr type let variant_name = variant.name.as_str(); @@ -1478,7 +1424,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, &variant_name); let metadata_stub = create_struct_stub(cx, - variant_llvm_type, + enum_type, &variant_name, unique_type_id, containing_scope); @@ -1526,7 +1472,7 @@ fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, span, }); - (metadata_stub, variant_llvm_type, member_description_factory) + (metadata_stub, member_description_factory) } fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, @@ -1570,12 +1516,11 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, match cached_discriminant_type_metadata { Some(discriminant_type_metadata) => discriminant_type_metadata, None => { - let discriminant_llvm_type = Type::from_integer(cx, inttype); let (discriminant_size, discriminant_align) = - size_and_align_of(cx, discriminant_llvm_type); + (inttype.size(), inttype.align(cx)); let discriminant_base_type_metadata = type_metadata(cx, - inttype.to_ty(&cx.tcx(), signed), + inttype.to_ty(cx.tcx(), signed), syntax_pos::DUMMY_SP); let discriminant_name = get_enum_discriminant_name(cx, enum_def_id); @@ -1587,8 +1532,8 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, name.as_ptr(), file_metadata, UNKNOWN_LINE_NUMBER, - bytes_to_bits(discriminant_size), - bytes_to_bits(discriminant_align), + discriminant_size.bits(), + discriminant_align.abi_bits() as u32, create_DIArray(DIB(cx), &enumerators_metadata), discriminant_base_type_metadata) }; @@ -1615,8 +1560,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ref l @ _ => bug!("Not an enum layout: {:#?}", l) }; - let enum_llvm_type = type_of::type_of(cx, enum_type); - let (enum_type_size, enum_type_align) = size_and_align_of(cx, enum_llvm_type); + let (enum_type_size, enum_type_align) = cx.size_and_align_of(enum_type); let enum_name = CString::new(enum_name).unwrap(); let unique_type_id_str = CString::new( @@ -1629,8 +1573,8 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_name.as_ptr(), file_metadata, UNKNOWN_LINE_NUMBER, - bytes_to_bits(enum_type_size), - bytes_to_bits(enum_type_align), + enum_type_size.bits(), + enum_type_align.abi_bits() as u32, DIFlags::FlagZero, ptr::null_mut(), 0, // RuntimeLang @@ -1642,7 +1586,6 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, enum_type, unique_type_id, enum_metadata, - enum_llvm_type, EnumMDF(EnumMemberDescriptionFactory { enum_type, type_rep: type_rep.layout, @@ -1664,28 +1607,27 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, /// results in a LLVM struct. /// /// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums. -fn composite_type_metadata(cx: &CrateContext, - composite_llvm_type: Type, - composite_type_name: &str, - composite_type_unique_id: UniqueTypeId, - member_descriptions: &[MemberDescription], - containing_scope: DIScope, - - // Ignore source location information as long as it - // can't be reconstructed for non-local crates. - _file_metadata: DIFile, - _definition_span: Span) - -> DICompositeType { +fn composite_type_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + composite_type: Ty<'tcx>, + composite_type_name: &str, + composite_type_unique_id: UniqueTypeId, + member_descriptions: &[MemberDescription], + containing_scope: DIScope, + + // Ignore source location information as long as it + // can't be reconstructed for non-local crates. + _file_metadata: DIFile, + _definition_span: Span) + -> DICompositeType { // Create the (empty) struct metadata node ... let composite_type_metadata = create_struct_stub(cx, - composite_llvm_type, + composite_type, composite_type_name, composite_type_unique_id, containing_scope); // ... and immediately create and add the member descriptions. set_members_of_composite_type(cx, composite_type_metadata, - composite_llvm_type, member_descriptions); return composite_type_metadata; @@ -1693,7 +1635,6 @@ fn composite_type_metadata(cx: &CrateContext, fn set_members_of_composite_type(cx: &CrateContext, composite_type_metadata: DICompositeType, - composite_llvm_type: Type, member_descriptions: &[MemberDescription]) { // In some rare cases LLVM metadata uniquing would lead to an existing type // description being used instead of a new one created in @@ -1714,14 +1655,7 @@ fn set_members_of_composite_type(cx: &CrateContext, let member_metadata: Vec<DIDescriptor> = member_descriptions .iter() - .enumerate() - .map(|(i, member_description)| { - let (member_size, member_align) = size_and_align_of(cx, member_description.llvm_type); - let member_offset = match member_description.offset { - FixedMemberOffset { bytes } => bytes as u64, - ComputedMemberOffset => machine::llelement_offset(cx, composite_llvm_type, i) - }; - + .map(|member_description| { let member_name = member_description.name.as_bytes(); let member_name = CString::new(member_name).unwrap(); unsafe { @@ -1731,9 +1665,9 @@ fn set_members_of_composite_type(cx: &CrateContext, member_name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(member_size), - bytes_to_bits(member_align), - bytes_to_bits(member_offset), + member_description.size.bits(), + member_description.align.abi_bits() as u32, + member_description.offset.bits(), member_description.flags, member_description.type_metadata) } @@ -1750,13 +1684,13 @@ fn set_members_of_composite_type(cx: &CrateContext, // A convenience wrapper around LLVMRustDIBuilderCreateStructType(). Does not do // any caching, does not add any fields to the struct. This can be done later // with set_members_of_composite_type(). -fn create_struct_stub(cx: &CrateContext, - struct_llvm_type: Type, - struct_type_name: &str, - unique_type_id: UniqueTypeId, - containing_scope: DIScope) - -> DICompositeType { - let (struct_size, struct_align) = size_and_align_of(cx, struct_llvm_type); +fn create_struct_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + struct_type: Ty<'tcx>, + struct_type_name: &str, + unique_type_id: UniqueTypeId, + containing_scope: DIScope) + -> DICompositeType { + let (struct_size, struct_align) = cx.size_and_align_of(struct_type); let name = CString::new(struct_type_name).unwrap(); let unique_type_id = CString::new( @@ -1774,8 +1708,8 @@ fn create_struct_stub(cx: &CrateContext, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(struct_size), - bytes_to_bits(struct_align), + struct_size.bits(), + struct_align.abi_bits() as u32, DIFlags::FlagZero, ptr::null_mut(), empty_array, @@ -1787,13 +1721,13 @@ fn create_struct_stub(cx: &CrateContext, return metadata_stub; } -fn create_union_stub(cx: &CrateContext, - union_llvm_type: Type, - union_type_name: &str, - unique_type_id: UniqueTypeId, - containing_scope: DIScope) - -> DICompositeType { - let (union_size, union_align) = size_and_align_of(cx, union_llvm_type); +fn create_union_stub<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + union_type: Ty<'tcx>, + union_type_name: &str, + unique_type_id: UniqueTypeId, + containing_scope: DIScope) + -> DICompositeType { + let (union_size, union_align) = cx.size_and_align_of(union_type); let name = CString::new(union_type_name).unwrap(); let unique_type_id = CString::new( @@ -1811,8 +1745,8 @@ fn create_union_stub(cx: &CrateContext, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(union_size), - bytes_to_bits(union_align), + union_size.bits(), + union_align.abi_bits() as u32, DIFlags::FlagZero, empty_array, 0, // RuntimeLang @@ -1867,7 +1801,7 @@ pub fn create_global_var_metadata(cx: &CrateContext, is_local_to_unit, global, ptr::null_mut(), - global_align, + global_align.abi() as u32, ); } } @@ -1899,8 +1833,6 @@ pub fn create_vtable_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } let type_metadata = type_metadata(cx, ty, syntax_pos::DUMMY_SP); - let llvm_vtable_type = Type::vtable_ptr(cx).element_type(); - let (struct_size, struct_align) = size_and_align_of(cx, llvm_vtable_type); unsafe { // LLVMRustDIBuilderCreateStructType() wants an empty array. A null @@ -1919,8 +1851,8 @@ pub fn create_vtable_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, name.as_ptr(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, - bytes_to_bits(struct_size), - bytes_to_bits(struct_align), + Size::from_bytes(0).bits(), + cx.tcx().data_layout.pointer_align.abi_bits() as u32, DIFlags::FlagArtificial, ptr::null_mut(), empty_array, diff --git a/src/librustc_trans/debuginfo/mod.rs b/src/librustc_trans/debuginfo/mod.rs index 15b299674ee..1ca12771dd4 100644 --- a/src/librustc_trans/debuginfo/mod.rs +++ b/src/librustc_trans/debuginfo/mod.rs @@ -499,7 +499,7 @@ pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, cx.sess().opts.optimize != config::OptLevel::No, DIFlags::FlagZero, argument_index, - align, + align.abi() as u32, ) }; source_loc::set_debug_location(bcx, diff --git a/src/librustc_trans/debuginfo/utils.rs b/src/librustc_trans/debuginfo/utils.rs index ad4fdfca726..95427d9b3cd 100644 --- a/src/librustc_trans/debuginfo/utils.rs +++ b/src/librustc_trans/debuginfo/utils.rs @@ -18,15 +18,11 @@ use rustc::ty::DefIdTree; use llvm; use llvm::debuginfo::{DIScope, DIBuilderRef, DIDescriptor, DIArray}; -use machine; use common::{CrateContext}; -use type_::Type; use syntax_pos::{self, Span}; use syntax::ast; -use std::ops; - pub fn is_node_local_to_unit(cx: &CrateContext, node_id: ast::NodeId) -> bool { // The is_local_to_unit flag indicates whether a function is local to the @@ -53,15 +49,6 @@ pub fn span_start(cx: &CrateContext, span: Span) -> syntax_pos::Loc { cx.sess().codemap().lookup_char_pos(span.lo()) } -pub fn size_and_align_of(cx: &CrateContext, llvm_type: Type) -> (u64, u32) { - (machine::llsize_of_alloc(cx, llvm_type), machine::llalign_of_min(cx, llvm_type)) -} - -pub fn bytes_to_bits<T>(bytes: T) -> T - where T: ops::Mul<Output=T> + From<u8> { - bytes * 8u8.into() -} - #[inline] pub fn debug_context<'a, 'tcx>(cx: &'a CrateContext<'a, 'tcx>) -> &'a CrateDebugContext<'tcx> { diff --git a/src/librustc_trans/glue.rs b/src/librustc_trans/glue.rs index 453b98a1d74..597d8c587e9 100644 --- a/src/librustc_trans/glue.rs +++ b/src/librustc_trans/glue.rs @@ -29,12 +29,11 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf debug!("calculate size of DST: {}; with lost info: {:?}", t, Value(info)); if bcx.ccx.shared().type_is_sized(t) { - let size = bcx.ccx.size_of(t); - let align = bcx.ccx.align_of(t); - debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}", + let (size, align) = bcx.ccx.size_and_align_of(t); + debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", t, Value(info), size, align); - let size = C_usize(bcx.ccx, size); - let align = C_usize(bcx.ccx, align as u64); + let size = C_usize(bcx.ccx, size.bytes()); + let align = C_usize(bcx.ccx, align.abi()); return (size, align); } assert!(!info.is_null()); @@ -122,8 +121,9 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf let unit = t.sequence_element_type(bcx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. - (bcx.mul(info, C_usize(bcx.ccx, bcx.ccx.size_of(unit))), - C_usize(bcx.ccx, bcx.ccx.align_of(unit) as u64)) + let (size, align) = bcx.ccx.size_and_align_of(unit); + (bcx.mul(info, C_usize(bcx.ccx, size.bytes())), + C_usize(bcx.ccx, align.abi())) } _ => bug!("Unexpected unsized type, found {}", t) } diff --git a/src/librustc_trans/intrinsic.rs b/src/librustc_trans/intrinsic.rs index daeb0dd680f..c66a8ae2fcc 100644 --- a/src/librustc_trans/intrinsic.rs +++ b/src/librustc_trans/intrinsic.rs @@ -21,9 +21,9 @@ use common::*; use declare; use glue; use type_of; -use machine; use type_::Type; use rustc::ty::{self, Ty}; +use rustc::ty::layout::HasDataLayout; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; @@ -125,7 +125,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } "try" => { try_intrinsic(bcx, ccx, llargs[0], llargs[1], llargs[2], llresult); - C_nil(ccx) + return; } "breakpoint" => { let llfn = ccx.get_intrinsic(&("llvm.debugtrap")); @@ -133,42 +133,39 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, } "size_of" => { let tp_ty = substs.type_at(0); - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_usize(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) + C_usize(ccx, ccx.size_of(tp_ty).bytes()) } "size_of_val" => { let tp_ty = substs.type_at(0); if bcx.ccx.shared().type_is_sized(tp_ty) { - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_usize(ccx, machine::llsize_of_alloc(ccx, lltp_ty)) + C_usize(ccx, ccx.size_of(tp_ty).bytes()) } else if bcx.ccx.shared().type_has_metadata(tp_ty) { let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llsize } else { - C_usize(ccx, 0u64) + C_usize(ccx, 0) } } "min_align_of" => { let tp_ty = substs.type_at(0); - C_usize(ccx, ccx.align_of(tp_ty) as u64) + C_usize(ccx, ccx.align_of(tp_ty).abi()) } "min_align_of_val" => { let tp_ty = substs.type_at(0); if bcx.ccx.shared().type_is_sized(tp_ty) { - C_usize(ccx, ccx.align_of(tp_ty) as u64) + C_usize(ccx, ccx.align_of(tp_ty).abi()) } else if bcx.ccx.shared().type_has_metadata(tp_ty) { let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); llalign } else { - C_usize(ccx, 1u64) + C_usize(ccx, 1) } } "pref_align_of" => { let tp_ty = substs.type_at(0); - let lltp_ty = type_of::type_of(ccx, tp_ty); - C_usize(ccx, machine::llalign_of_pref(ccx, lltp_ty) as u64) + C_usize(ccx, ccx.align_of(tp_ty).pref()) } "type_name" => { let tp_ty = substs.type_at(0); @@ -187,11 +184,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // large quantities of `mov [byte ptr foo],0` in the generated code.) memset_intrinsic(bcx, false, ty, llresult, C_u8(ccx, 0), C_usize(ccx, 1)); } - C_nil(ccx) + return; } // Effectively no-ops "uninit" => { - C_nil(ccx) + return; } "needs_drop" => { let tp_ty = substs.type_at(0); @@ -232,11 +229,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let tp_ty = substs.type_at(0); let mut ptr = llargs[0]; if let Some(ty) = fn_ty.ret.cast { - ptr = bcx.pointercast(ptr, ty.ptr_to()); + ptr = bcx.pointercast(ptr, ty.llvm_type(ccx).ptr_to()); } let load = bcx.volatile_load(ptr); unsafe { - llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty)); + llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty).abi() as u32); } to_immediate(bcx, load, tp_ty) }, @@ -249,19 +246,18 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let val = if fn_ty.args[1].is_indirect() { bcx.load(llargs[1], None) } else { - if !type_is_zero_size(ccx, tp_ty) { - from_immediate(bcx, llargs[1]) - } else { - C_nil(ccx) + if type_is_zero_size(ccx, tp_ty) { + return; } + from_immediate(bcx, llargs[1]) }; let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to()); let store = bcx.volatile_store(val, ptr); unsafe { - llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty)); + llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty).abi() as u32); } } - C_nil(ccx) + return; }, "prefetch_read_data" | "prefetch_write_data" | "prefetch_read_instruction" | "prefetch_write_instruction" => { @@ -279,8 +275,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" => { - let sty = &arg_tys[0].sty; - match int_type_width_signed(sty, ccx) { + let ty = arg_tys[0]; + match int_type_width_signed(ty, ccx) { Some((width, signed)) => match name { "ctlz" | "cttz" => { @@ -317,7 +313,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, bcx.store(result, bcx.struct_gep(llresult, 0), None); bcx.store(overflow, bcx.struct_gep(llresult, 1), None); - C_nil(bcx.ccx) + return; }, "overflowing_add" => bcx.add(llargs[0], llargs[1]), "overflowing_sub" => bcx.sub(llargs[0], llargs[1]), @@ -347,8 +343,8 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, span_invalid_monomorphization_error( tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); - C_nil(ccx) + expected basic integer type, found `{}`", name, ty)); + return; } } @@ -370,7 +366,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ expected basic float type, found `{}`", name, sty)); - C_nil(ccx) + return; } } @@ -399,11 +395,14 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, bcx.select(is_zero, zero, bcx.sub(offset, llargs[1])) } name if name.starts_with("simd_") => { - generic_simd_intrinsic(bcx, name, - callee_ty, - &llargs, - ret_ty, llret_ty, - span) + match generic_simd_intrinsic(bcx, name, + callee_ty, + &llargs, + ret_ty, llret_ty, + span) { + Ok(llval) => llval, + Err(()) => return + } } // This requires that atomic intrinsics follow a specific naming pattern: // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst @@ -437,16 +436,16 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, _ => ccx.sess().fatal("Atomic intrinsic not in correct format"), }; - let invalid_monomorphization = |sty| { + let invalid_monomorphization = |ty| { span_invalid_monomorphization_error(tcx.sess, span, &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, sty)); + expected basic integer type, found `{}`", name, ty)); }; match split[1] { "cxchg" | "cxchgweak" => { - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; let val = bcx.atomic_cmpxchg(llargs[0], llargs[1], llargs[2], order, failorder, weak); @@ -454,40 +453,41 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let success = bcx.zext(bcx.extract_value(val, 1), Type::bool(bcx.ccx)); bcx.store(result, bcx.struct_gep(llresult, 0), None); bcx.store(success, bcx.struct_gep(llresult, 1), None); + return; } else { - invalid_monomorphization(sty); + return invalid_monomorphization(ty); } - C_nil(ccx) } "load" => { - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { - bcx.atomic_load(llargs[0], order) + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { + let align = ccx.align_of(ty); + bcx.atomic_load(llargs[0], order, align) } else { - invalid_monomorphization(sty); - C_nil(ccx) + return invalid_monomorphization(ty); } } "store" => { - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { - bcx.atomic_store(llargs[1], llargs[0], order); + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { + let align = ccx.align_of(ty); + bcx.atomic_store(llargs[1], llargs[0], order, align); + return; } else { - invalid_monomorphization(sty); + return invalid_monomorphization(ty); } - C_nil(ccx) } "fence" => { bcx.atomic_fence(order, llvm::SynchronizationScope::CrossThread); - C_nil(ccx) + return; } "singlethreadfence" => { bcx.atomic_fence(order, llvm::SynchronizationScope::SingleThread); - C_nil(ccx) + return; } // These are all AtomicRMW ops @@ -507,12 +507,11 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, _ => ccx.sess().fatal("unknown atomic operation") }; - let sty = &substs.type_at(0).sty; - if int_type_width_signed(sty, ccx).is_some() { + let ty = substs.type_at(0); + if int_type_width_signed(ty, ccx).is_some() { bcx.atomic_rmw(atom_op, llargs[0], llargs[1], order) } else { - invalid_monomorphization(sty); - C_nil(ccx) + return invalid_monomorphization(ty); } } } @@ -662,16 +661,16 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let (dest, align) = lval.trans_field_ptr(bcx, i); bcx.store(val, dest, align.to_align()); } - C_nil(ccx) + return; } _ => val, } } }; - if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { + if !fn_ty.ret.is_ignore() { if let Some(ty) = fn_ty.ret.cast { - let ptr = bcx.pointercast(llresult, ty.ptr_to()); + let ptr = bcx.pointercast(llresult, ty.llvm_type(ccx).ptr_to()); bcx.store(llval, ptr, Some(ccx.align_of(ret_ty))); } else { store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty); @@ -682,16 +681,15 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, allow_overlap: bool, volatile: bool, - tp_ty: Ty<'tcx>, + ty: Ty<'tcx>, dst: ValueRef, src: ValueRef, count: ValueRef) -> ValueRef { let ccx = bcx.ccx; - let lltp_ty = type_of::type_of(ccx, tp_ty); - let align = C_i32(ccx, ccx.align_of(tp_ty) as i32); - let size = machine::llsize_of(ccx, lltp_ty); - let int_size = machine::llbitsize_of_real(ccx, ccx.isize_ty()); + let (size, align) = ccx.size_and_align_of(ty); + let size = C_usize(ccx, size.bytes()); + let align = C_i32(ccx, align.abi() as i32); let operation = if allow_overlap { "memmove" @@ -699,7 +697,8 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, "memcpy" }; - let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size); + let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, + ccx.data_layout().pointer_size.bits()); let dst_ptr = bcx.pointercast(dst, Type::i8p(ccx)); let src_ptr = bcx.pointercast(src, Type::i8p(ccx)); @@ -723,9 +722,9 @@ fn memset_intrinsic<'a, 'tcx>( count: ValueRef ) -> ValueRef { let ccx = bcx.ccx; - let align = C_i32(ccx, ccx.align_of(ty) as i32); - let lltp_ty = type_of::type_of(ccx, ty); - let size = machine::llsize_of(ccx, lltp_ty); + let (size, align) = ccx.size_and_align_of(ty); + let size = C_usize(ccx, size.bytes()); + let align = C_i32(ccx, align.abi() as i32); let dst = bcx.pointercast(dst, Type::i8p(ccx)); call_memset(bcx, dst, val, bcx.mul(size, count), align, volatile) } @@ -975,7 +974,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( ret_ty: Ty<'tcx>, llret_ty: Type, span: Span -) -> ValueRef { +) -> Result<ValueRef, ()> { // macros for error handling: macro_rules! emit_error { ($msg: tt) => { @@ -993,7 +992,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( ($cond: expr, $($fmt: tt)*) => { if !$cond { emit_error!($($fmt)*); - return C_nil(bcx.ccx) + return Err(()); } } } @@ -1039,12 +1038,12 @@ fn generic_simd_intrinsic<'a, 'tcx>( ret_ty, ret_ty.simd_type(tcx)); - return compare_simd_types(bcx, - llargs[0], - llargs[1], - in_elem, - llret_ty, - cmp_op) + return Ok(compare_simd_types(bcx, + llargs[0], + llargs[1], + in_elem, + llret_ty, + cmp_op)) } if name.starts_with("simd_shuffle") { @@ -1090,23 +1089,23 @@ fn generic_simd_intrinsic<'a, 'tcx>( .collect(); let indices = match indices { Some(i) => i, - None => return C_null(llret_ty) + None => return Ok(C_null(llret_ty)) }; - return bcx.shuffle_vector(llargs[0], llargs[1], C_vector(&indices)) + return Ok(bcx.shuffle_vector(llargs[0], llargs[1], C_vector(&indices))) } if name == "simd_insert" { require!(in_elem == arg_tys[2], "expected inserted type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, arg_tys[2]); - return bcx.insert_element(llargs[0], llargs[2], llargs[1]) + return Ok(bcx.insert_element(llargs[0], llargs[2], llargs[1])) } if name == "simd_extract" { require!(ret_ty == in_elem, "expected return type `{}` (element of input `{}`), found `{}`", in_elem, in_ty, ret_ty); - return bcx.extract_element(llargs[0], llargs[1]) + return Ok(bcx.extract_element(llargs[0], llargs[1])) } if name == "simd_cast" { @@ -1120,7 +1119,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( // casting cares about nominal type, not just structural type let out_elem = ret_ty.simd_type(tcx); - if in_elem == out_elem { return llargs[0]; } + if in_elem == out_elem { return Ok(llargs[0]); } enum Style { Float, Int(/* is signed? */ bool), Unsupported } @@ -1141,7 +1140,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( match (in_style, out_style) { (Style::Int(in_is_signed), Style::Int(_)) => { - return match in_width.cmp(&out_width) { + return Ok(match in_width.cmp(&out_width) { Ordering::Greater => bcx.trunc(llargs[0], llret_ty), Ordering::Equal => llargs[0], Ordering::Less => if in_is_signed { @@ -1149,28 +1148,28 @@ fn generic_simd_intrinsic<'a, 'tcx>( } else { bcx.zext(llargs[0], llret_ty) } - } + }) } (Style::Int(in_is_signed), Style::Float) => { - return if in_is_signed { + return Ok(if in_is_signed { bcx.sitofp(llargs[0], llret_ty) } else { bcx.uitofp(llargs[0], llret_ty) - } + }) } (Style::Float, Style::Int(out_is_signed)) => { - return if out_is_signed { + return Ok(if out_is_signed { bcx.fptosi(llargs[0], llret_ty) } else { bcx.fptoui(llargs[0], llret_ty) - } + }) } (Style::Float, Style::Float) => { - return match in_width.cmp(&out_width) { + return Ok(match in_width.cmp(&out_width) { Ordering::Greater => bcx.fptrunc(llargs[0], llret_ty), Ordering::Equal => llargs[0], Ordering::Less => bcx.fpext(llargs[0], llret_ty) - } + }) } _ => {/* Unsupported. Fallthrough. */} } @@ -1186,7 +1185,7 @@ fn generic_simd_intrinsic<'a, 'tcx>( match in_elem.sty { $( $(ty::$p(_))|* => { - return bcx.$call(llargs[0], llargs[1]) + return Ok(bcx.$call(llargs[0], llargs[1])) } )* _ => {}, @@ -1213,15 +1212,13 @@ fn generic_simd_intrinsic<'a, 'tcx>( span_bug!(span, "unknown SIMD intrinsic"); } -// Returns the width of an int TypeVariant, and if it's signed or not +// Returns the width of an int Ty, and if it's signed or not // Returns None if the type is not an integer // FIXME: there’s multiple of this functions, investigate using some of the already existing // stuffs. -fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext) - -> Option<(u64, bool)> { - use rustc::ty::{TyInt, TyUint}; - match *sty { - TyInt(t) => Some((match t { +fn int_type_width_signed(ty: Ty, ccx: &CrateContext) -> Option<(u64, bool)> { + match ty.sty { + ty::TyInt(t) => Some((match t { ast::IntTy::Is => { match &ccx.tcx().sess.target.target.target_pointer_width[..] { "16" => 16, @@ -1236,7 +1233,7 @@ fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext) ast::IntTy::I64 => 64, ast::IntTy::I128 => 128, }, true)), - TyUint(t) => Some((match t { + ty::TyUint(t) => Some((match t { ast::UintTy::Us => { match &ccx.tcx().sess.target.target.target_pointer_width[..] { "16" => 16, diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index 96e11d36642..73e03dc0691 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -136,7 +136,6 @@ mod declare; mod glue; mod intrinsic; mod llvm_util; -mod machine; mod metadata; mod meth; mod mir; diff --git a/src/librustc_trans/machine.rs b/src/librustc_trans/machine.rs deleted file mode 100644 index bc383abc7e0..00000000000 --- a/src/librustc_trans/machine.rs +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -// Information concerning the machine representation of various types. - -#![allow(non_camel_case_types)] - -use llvm::{self, ValueRef}; -use common::*; - -use type_::Type; - -pub type llbits = u64; -pub type llsize = u64; -pub type llalign = u32; - -// ______________________________________________________________________ -// compute sizeof / alignof - -// Returns the number of bytes between successive elements of type T in an -// array of T. This is the "ABI" size. It includes any ABI-mandated padding. -pub fn llsize_of_alloc(cx: &CrateContext, ty: Type) -> llsize { - unsafe { - return llvm::LLVMABISizeOfType(cx.td(), ty.to_ref()); - } -} - -/// Returns the "real" size of the type in bits. -pub fn llbitsize_of_real(cx: &CrateContext, ty: Type) -> llbits { - unsafe { - llvm::LLVMSizeOfTypeInBits(cx.td(), ty.to_ref()) - } -} - -/// Returns the size of the type as an LLVM constant integer value. -pub fn llsize_of(cx: &CrateContext, ty: Type) -> ValueRef { - // Once upon a time, this called LLVMSizeOf, which does a - // getelementptr(1) on a null pointer and casts to an int, in - // order to obtain the type size as a value without requiring the - // target data layout. But we have the target data layout, so - // there's no need for that contrivance. The instruction - // selection DAG generator would flatten that GEP(1) node into a - // constant of the type's alloc size, so let's save it some work. - return C_usize(cx, llsize_of_alloc(cx, ty)); -} - -// Returns the preferred alignment of the given type for the current target. -// The preferred alignment may be larger than the alignment used when -// packing the type into structs. This will be used for things like -// allocations inside a stack frame, which LLVM has a free hand in. -pub fn llalign_of_pref(cx: &CrateContext, ty: Type) -> llalign { - unsafe { - return llvm::LLVMPreferredAlignmentOfType(cx.td(), ty.to_ref()); - } -} - -// Returns the minimum alignment of a type required by the platform. -// This is the alignment that will be used for struct fields, arrays, -// and similar ABI-mandated things. -pub fn llalign_of_min(cx: &CrateContext, ty: Type) -> llalign { - unsafe { - return llvm::LLVMABIAlignmentOfType(cx.td(), ty.to_ref()); - } -} - -pub fn llelement_offset(cx: &CrateContext, struct_ty: Type, element: usize) -> u64 { - unsafe { - return llvm::LLVMOffsetOfElement(cx.td(), - struct_ty.to_ref(), - element as u32); - } -} diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index e7c5a36838c..a2e7eb2258f 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -13,11 +13,11 @@ use callee; use common::*; use builder::Builder; use consts; -use machine; use monomorphize; use type_::Type; use value::Value; use rustc::ty::{self, Ty}; +use rustc::ty::layout::HasDataLayout; use debuginfo; #[derive(Copy, Clone, Debug)] @@ -79,10 +79,11 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, // Not in the cache. Build it. let nullptr = C_null(Type::nil(ccx).ptr_to()); + let (size, align) = ccx.size_and_align_of(ty); let mut components: Vec<_> = [ callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.tcx(), ty)), - C_usize(ccx, ccx.size_of(ty)), - C_usize(ccx, ccx.align_of(ty) as u64) + C_usize(ccx, size.bytes()), + C_usize(ccx, align.abi()) ].iter().cloned().collect(); if let Some(trait_ref) = trait_ref { @@ -97,7 +98,7 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } let vtable_const = C_struct(ccx, &components, false); - let align = machine::llalign_of_pref(ccx, val_ty(vtable_const)); + let align = ccx.data_layout().pointer_align; let vtable = consts::addr_of(ccx, vtable_const, align, "vtable"); debuginfo::create_vtable_metadata(ccx, ty, vtable); diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index bd26c961bb2..abd86a5cb01 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -17,12 +17,11 @@ use rustc::traits; use rustc::mir; use abi::{Abi, FnType, ArgType}; use adt; -use base::{self, Lifetime}; +use base; use callee; use builder::Builder; use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_undef}; use consts; -use machine::llalign_of_min; use meth; use monomorphize; use type_of; @@ -31,8 +30,6 @@ use type_::Type; use syntax::symbol::Symbol; use syntax_pos::Pos; -use std::cmp; - use super::{MirContext, LocalRef}; use super::constant::Const; use super::lvalue::{Alignment, LvalueRef}; @@ -120,7 +117,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn_ty: FnType<'tcx>, fn_ptr: ValueRef, llargs: &[ValueRef], - destination: Option<(ReturnDest, Ty<'tcx>, mir::BasicBlock)>, + destination: Option<(ReturnDest<'tcx>, Ty<'tcx>, mir::BasicBlock)>, cleanup: Option<mir::BasicBlock> | { if let Some(cleanup) = cleanup { @@ -175,14 +172,23 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let Some(cleanup_pad) = cleanup_pad { bcx.cleanup_ret(cleanup_pad, None); } else { - let ps = self.get_personality_slot(&bcx); - let lp = bcx.load(ps, None); - Lifetime::End.call(&bcx, ps); + let slot = self.get_personality_slot(&bcx); + + let (lp0ptr, align) = slot.trans_field_ptr(&bcx, 0); + let lp0 = bcx.load(lp0ptr, align.to_align()); + + let (lp1ptr, align) = slot.trans_field_ptr(&bcx, 1); + let lp1 = bcx.load(lp1ptr, align.to_align()); + + slot.storage_dead(&bcx); + if !bcx.sess().target.target.options.custom_unwind_resume { + let mut lp = C_undef(self.landing_pad_type()); + lp = bcx.insert_value(lp, lp0, 0); + lp = bcx.insert_value(lp, lp1, 1); bcx.resume(lp); } else { - let exc_ptr = bcx.extract_value(lp, 0); - bcx.call(bcx.ccx.eh_unwind_resume(), &[exc_ptr], cleanup_bundle); + bcx.call(bcx.ccx.eh_unwind_resume(), &[lp0], cleanup_bundle); bcx.unreachable(); } } @@ -245,8 +251,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } }; let load = bcx.load( - bcx.pointercast(llslot, cast_ty.ptr_to()), - Some(ret.layout.align(bcx.ccx).abi() as u32)); + bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()), + Some(ret.layout.align(bcx.ccx))); load } else { let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); @@ -336,6 +342,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let filename = C_str_slice(bcx.ccx, filename); let line = C_u32(bcx.ccx, loc.line as u32); let col = C_u32(bcx.ccx, loc.col.to_usize() as u32 + 1); + let align = tcx.data_layout.aggregate_align + .max(tcx.data_layout.i32_align) + .max(tcx.data_layout.pointer_align); // Put together the arguments to the panic entry point. let (lang_item, args, const_err) = match *msg { @@ -351,7 +360,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { })); let file_line_col = C_struct(bcx.ccx, &[filename, line, col], false); - let align = llalign_of_min(bcx.ccx, common::val_ty(file_line_col)); let file_line_col = consts::addr_of(bcx.ccx, file_line_col, align, @@ -366,7 +374,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let msg_file_line_col = C_struct(bcx.ccx, &[msg_str, filename, line, col], false); - let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line_col)); let msg_file_line_col = consts::addr_of(bcx.ccx, msg_file_line_col, align, @@ -387,7 +394,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let msg_file_line_col = C_struct(bcx.ccx, &[msg_str, filename, line, col], false); - let align = llalign_of_min(bcx.ccx, common::val_ty(msg_file_line_col)); let msg_file_line_col = consts::addr_of(bcx.ccx, msg_file_line_col, align, @@ -552,7 +558,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { ReturnDest::Nothing => { (C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to()), &llargs[..]) } - ReturnDest::IndirectOperand(dst, _) | + ReturnDest::IndirectOperand(dst, _) => (dst.llval, &llargs[..]), ReturnDest::Store(dst) => (dst, &llargs[..]), ReturnDest::DirectOperand(_) => bug!("Cannot use direct operand with an intrinsic call") @@ -566,7 +572,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let ReturnDest::IndirectOperand(dst, _) = ret_dest { // Make a fake operand for store_return let op = OperandRef { - val: Ref(dst, Alignment::AbiAligned), + val: Ref(dst.llval, Alignment::AbiAligned), ty: sig.output(), }; self.store_return(&bcx, ret_dest, &fn_ty.ret, op); @@ -633,7 +639,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { - llargs.push(C_undef(ty)); + llargs.push(C_undef(ty.llvm_type(bcx.ccx))); } if arg.is_ignore() { @@ -651,13 +657,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { (op.pack_if_pair(bcx).immediate(), Alignment::AbiAligned, false) } } - Ref(llval, Alignment::Packed) if arg.is_indirect() => { + Ref(llval, align @ Alignment::Packed) if arg.is_indirect() => { // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg", None); - base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1)); + base::memcpy_ty(bcx, llscratch, llval, op.ty, align.to_align()); (llscratch, Alignment::AbiAligned, true) } Ref(llval, align) => (llval, align, true) @@ -670,8 +676,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None); llval = bcx.trunc(llval, Type::i1(bcx.ccx)); } else if let Some(ty) = arg.cast { - llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()), - align.min_with(arg.layout.align(bcx.ccx).abi() as u32)); + llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()), + align.min_with(Some(arg.layout.align(bcx.ccx)))); } else { llval = bcx.load(llval, align.to_align()); } @@ -759,14 +765,17 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } - fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> ValueRef { + fn get_personality_slot(&mut self, bcx: &Builder<'a, 'tcx>) -> LvalueRef<'tcx> { let ccx = bcx.ccx; - if let Some(slot) = self.llpersonalityslot { + if let Some(slot) = self.personality_slot { slot } else { - let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let slot = bcx.alloca(llretty, "personalityslot", None); - self.llpersonalityslot = Some(slot); + let ty = ccx.tcx().intern_tup(&[ + ccx.tcx().mk_mut_ptr(ccx.tcx().types.u8), + ccx.tcx().types.i32 + ], false); + let slot = LvalueRef::alloca(bcx, ty, "personalityslot"); + self.personality_slot = Some(slot); slot } } @@ -794,16 +803,26 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ccx = bcx.ccx; let llpersonality = self.ccx.eh_personality(); - let llretty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false); - let llretval = bcx.landing_pad(llretty, llpersonality, 1, self.llfn); - bcx.set_cleanup(llretval); + let llretty = self.landing_pad_type(); + let lp = bcx.landing_pad(llretty, llpersonality, 1, self.llfn); + bcx.set_cleanup(lp); + let slot = self.get_personality_slot(&bcx); - Lifetime::Start.call(&bcx, slot); - bcx.store(llretval, slot, None); + slot.storage_live(&bcx); + self.store_operand(&bcx, slot.llval, None, OperandRef { + val: Pair(bcx.extract_value(lp, 0), bcx.extract_value(lp, 1)), + ty: slot.ty.to_ty(ccx.tcx()) + }); + bcx.br(target_bb); bcx.llbb() } + fn landing_pad_type(&self) -> Type { + let ccx = self.ccx; + Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)], false) + } + fn unreachable_block(&mut self) -> BasicBlockRef { self.unreachable_block.unwrap_or_else(|| { let bl = self.new_block("unreachable"); @@ -825,7 +844,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { fn make_return_dest(&mut self, bcx: &Builder<'a, 'tcx>, dest: &mir::Lvalue<'tcx>, fn_ret_ty: &ArgType, - llargs: &mut Vec<ValueRef>, is_intrinsic: bool) -> ReturnDest { + llargs: &mut Vec<ValueRef>, is_intrinsic: bool) + -> ReturnDest<'tcx> { // If the return is ignored, we can just return a do-nothing ReturnDest if fn_ret_ty.is_ignore() { return ReturnDest::Nothing; @@ -841,14 +861,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); + tmp.storage_live(bcx); llargs.push(tmp.llval); - ReturnDest::IndirectOperand(tmp.llval, index) + ReturnDest::IndirectOperand(tmp, index) } else if is_intrinsic { // Currently, intrinsics always need a location to store // the result. so we create a temporary alloca for the // result let tmp = LvalueRef::alloca(bcx, ret_ty, "tmp_ret"); - ReturnDest::IndirectOperand(tmp.llval, index) + tmp.storage_live(bcx); + ReturnDest::IndirectOperand(tmp, index) } else { ReturnDest::DirectOperand(index) }; @@ -891,8 +913,10 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let lvalue_ty = self.monomorphized_lvalue_ty(dst); assert!(!lvalue_ty.has_erasable_regions()); let lvalue = LvalueRef::alloca(bcx, lvalue_ty, "transmute_temp"); + lvalue.storage_live(bcx); self.trans_transmute_into(bcx, src, &lvalue); let op = self.trans_load(bcx, lvalue.llval, lvalue.alignment, lvalue_ty); + lvalue.storage_dead(bcx); self.locals[index] = LocalRef::Operand(Some(op)); } LocalRef::Operand(Some(_)) => { @@ -915,15 +939,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); let in_type = val.ty; let out_type = dst.ty.to_ty(bcx.tcx()); - let llalign = cmp::min(bcx.ccx.align_of(in_type), bcx.ccx.align_of(out_type)); - self.store_operand(bcx, cast_ptr, Some(llalign), val); + let align = bcx.ccx.align_of(in_type).min(bcx.ccx.align_of(out_type)); + self.store_operand(bcx, cast_ptr, Some(align), val); } // Stores the return value of a function call into it's final location. fn store_return(&mut self, bcx: &Builder<'a, 'tcx>, - dest: ReturnDest, + dest: ReturnDest<'tcx>, ret_ty: &ArgType<'tcx>, op: OperandRef<'tcx>) { use self::ReturnDest::*; @@ -932,15 +956,19 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { Nothing => (), Store(dst) => ret_ty.store(bcx, op.immediate(), dst), IndirectOperand(tmp, index) => { - let op = self.trans_load(bcx, tmp, Alignment::AbiAligned, op.ty); + let op = self.trans_load(bcx, tmp.llval, Alignment::AbiAligned, op.ty); + tmp.storage_dead(bcx); self.locals[index] = LocalRef::Operand(Some(op)); } DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if ret_ty.cast.is_some() { let tmp = LvalueRef::alloca(bcx, op.ty, "tmp_ret"); + tmp.storage_live(bcx); ret_ty.store(bcx, op.immediate(), tmp.llval); - self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty) + let op = self.trans_load(bcx, tmp.llval, tmp.alignment, op.ty); + tmp.storage_dead(bcx); + op } else { op.unpack_if_pair(bcx) }; @@ -950,13 +978,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } -enum ReturnDest { +enum ReturnDest<'tcx> { // Do nothing, the return value is indirect or ignored Nothing, // Store the return value to the pointer Store(ValueRef), // Stores an indirect return value to an operand local lvalue - IndirectOperand(ValueRef, mir::Local), + IndirectOperand(LvalueRef<'tcx>, mir::Local), // Stores a direct return value to an operand local lvalue DirectOperand(mir::Local) } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 67fdc1e640a..57c131a106b 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -18,12 +18,12 @@ use rustc::traits; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, LayoutTyper, Size}; use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::subst::{Kind, Substs, Subst}; use rustc_apfloat::{ieee, Float, Status}; use rustc_data_structures::indexed_vec::{Idx, IndexVec}; -use {adt, base, machine}; +use {adt, base}; use abi::{self, Abi}; use callee; use builder::Builder; @@ -100,9 +100,11 @@ impl<'tcx> Const<'tcx> { ConstVal::Bool(v) => C_bool(ccx, v), ConstVal::Integral(ref i) => return Const::from_constint(ccx, i), ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()), - ConstVal::ByteStr(v) => consts::addr_of(ccx, C_bytes(ccx, v.data), 1, "byte_str"), + ConstVal::ByteStr(v) => { + consts::addr_of(ccx, C_bytes(ccx, v.data), ccx.align_of(ty), "byte_str") + } ConstVal::Char(c) => C_uint(Type::char(ccx), c as u64), - ConstVal::Function(..) => C_null(type_of::type_of(ccx, ty)), + ConstVal::Function(..) => C_null(llty), ConstVal::Variant(_) | ConstVal::Aggregate(..) | ConstVal::Unevaluated(..) => { @@ -368,12 +370,12 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { match &tcx.item_name(def_id)[..] { "size_of" => { let llval = C_usize(self.ccx, - self.ccx.size_of(substs.type_at(0))); + self.ccx.size_of(substs.type_at(0)).bytes()); Ok(Const::new(llval, tcx.types.usize)) } "min_align_of" => { let llval = C_usize(self.ccx, - self.ccx.align_of(substs.type_at(0)) as u64); + self.ccx.align_of(substs.type_at(0)).abi()); Ok(Const::new(llval, tcx.types.usize)) } _ => span_bug!(span, "{:?} in constant", terminator.kind) @@ -590,7 +592,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { self.const_array(dest_ty, &fields) } - mir::Rvalue::Aggregate(ref kind, ref operands) => { + mir::Rvalue::Aggregate(box mir::AggregateKind::Array(_), ref operands) => { // Make sure to evaluate all operands to // report as many errors as we possibly can. let mut fields = Vec::with_capacity(operands.len()); @@ -603,17 +605,23 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } failure?; - match **kind { - mir::AggregateKind::Array(_) => { - self.const_array(dest_ty, &fields) - } - mir::AggregateKind::Adt(..) | - mir::AggregateKind::Closure(..) | - mir::AggregateKind::Generator(..) | - mir::AggregateKind::Tuple => { - Const::new(trans_const(self.ccx, dest_ty, kind, &fields), dest_ty) + self.const_array(dest_ty, &fields) + } + + mir::Rvalue::Aggregate(ref kind, ref operands) => { + // Make sure to evaluate all operands to + // report as many errors as we possibly can. + let mut fields = Vec::with_capacity(operands.len()); + let mut failure = Ok(()); + for operand in operands { + match self.const_operand(operand, span) { + Ok(val) => fields.push(val), + Err(err) => if failure.is_ok() { failure = Err(err); } } } + failure?; + + trans_const_adt(self.ccx, dest_ty, kind, &fields) } mir::Rvalue::Cast(ref kind, ref source, cast_ty) => { @@ -780,7 +788,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let align = if self.ccx.shared().type_is_sized(ty) { self.ccx.align_of(ty) } else { - self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign + self.ccx.tcx().data_layout.pointer_align }; if bk == mir::BorrowKind::Mut { consts::addr_of_mut(self.ccx, llval, align, "ref_mut") @@ -860,7 +868,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(self.ccx.shared().type_is_sized(ty)); - let llval = C_usize(self.ccx, self.ccx.size_of(ty)); + let llval = C_usize(self.ccx, self.ccx.size_of(ty).bytes()); Const::new(llval, tcx.types.usize) } @@ -1099,12 +1107,12 @@ pub fn trans_static_initializer<'a, 'tcx>( /// Currently the returned value has the same size as the type, but /// this could be changed in the future to avoid allocating unnecessary /// space after values of shorter-than-maximum cases. -fn trans_const<'a, 'tcx>( +fn trans_const_adt<'a, 'tcx>( ccx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>, kind: &mir::AggregateKind, - vals: &[ValueRef] -) -> ValueRef { + vals: &[Const<'tcx>] +) -> Const<'tcx> { let l = ccx.layout_of(t); let variant_index = match *kind { mir::AggregateKind::Adt(_, index, _, _) => index, @@ -1121,112 +1129,97 @@ fn trans_const<'a, 'tcx>( }; assert_eq!(vals.len(), 0); adt::assert_discr_in_range(min, max, discr); - C_int(Type::from_integer(ccx, d), discr as i64) + Const::new(C_int(Type::from_integer(ccx, d), discr as i64), t) } layout::General { discr: d, ref variants, .. } => { let variant = &variants[variant_index]; let lldiscr = C_int(Type::from_integer(ccx, d), variant_index as i64); - let mut vals_with_discr = vec![lldiscr]; + let mut vals_with_discr = vec![ + Const::new(lldiscr, d.to_ty(ccx.tcx(), false)) + ]; vals_with_discr.extend_from_slice(vals); - let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); - let needed_padding = l.size(ccx).bytes() - variant.stride().bytes(); - if needed_padding > 0 { - contents.push(padding(ccx, needed_padding)); - } - C_struct(ccx, &contents[..], false) + build_const_struct(ccx, l, &variant, &vals_with_discr) } layout::UntaggedUnion { ref variants, .. }=> { assert_eq!(variant_index, 0); - let contents = build_const_union(ccx, variants, vals[0]); - C_struct(ccx, &contents, variants.packed) + let mut contents = vec![vals[0].llval]; + + let offset = ccx.size_of(vals[0].ty); + let size = variants.stride(); + if offset != size { + contents.push(padding(ccx, size - offset)); + } + + Const::new(C_struct(ccx, &contents, variants.packed), t) } layout::Univariant { ref variant, .. } => { assert_eq!(variant_index, 0); - let contents = build_const_struct(ccx, &variant, vals); - C_struct(ccx, &contents[..], variant.packed) + build_const_struct(ccx, l, &variant, vals) } layout::Vector { .. } => { - C_vector(vals) + Const::new(C_vector(&vals.iter().map(|x| x.llval).collect::<Vec<_>>()), t) } layout::RawNullablePointer { nndiscr, .. } => { if variant_index as u64 == nndiscr { assert_eq!(vals.len(), 1); - vals[0] + Const::new(vals[0].llval, t) } else { - C_null(type_of::type_of(ccx, t)) + Const::new(C_null(type_of::type_of(ccx, t)), t) } } layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { if variant_index as u64 == nndiscr { - C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) + build_const_struct(ccx, l, &nonnull, vals) } else { // Always use null even if it's not the `discrfield`th // field; see #8506. - C_null(type_of::type_of(ccx, t)) + Const::new(C_null(type_of::type_of(ccx, t)), t) } } - _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) + _ => bug!("trans_const_adt: cannot handle type {} repreented as {:#?}", t, l) } } /// Building structs is a little complicated, because we might need to /// insert padding if a field's value is less aligned than its type. /// -/// Continuing the example from `trans_const`, a value of type `(u32, +/// Continuing the example from `trans_const_adt`, a value of type `(u32, /// E)` should have the `E` at offset 8, but if that field's /// initializer is 4-byte aligned then simply translating the tuple as /// a two-element struct will locate it at offset 4, and accesses to it /// will read the wrong memory. fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + layout: layout::TyLayout<'tcx>, st: &layout::Struct, - vals: &[ValueRef]) - -> Vec<ValueRef> { + vals: &[Const<'tcx>]) + -> Const<'tcx> { assert_eq!(vals.len(), st.offsets.len()); - if vals.len() == 0 { - return Vec::new(); - } - // offset of current value - let mut offset = 0; + let mut offset = Size::from_bytes(0); let mut cfields = Vec::new(); cfields.reserve(st.offsets.len()*2); let parts = st.field_index_by_increasing_offset().map(|i| { - (&vals[i], st.offsets[i].bytes()) + (vals[i], st.offsets[i]) }); - for (&val, target_offset) in parts { + for (val, target_offset) in parts { if offset < target_offset { cfields.push(padding(ccx, target_offset - offset)); - offset = target_offset; } - assert!(!is_undef(val)); - cfields.push(val); - offset += machine::llsize_of_alloc(ccx, val_ty(val)); - } - - if offset < st.stride().bytes() { - cfields.push(padding(ccx, st.stride().bytes() - offset)); + assert!(!is_undef(val.llval)); + cfields.push(val.llval); + offset = target_offset + ccx.size_of(val.ty); } - cfields -} - -fn build_const_union<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - un: &layout::Union, - field_val: ValueRef) - -> Vec<ValueRef> { - let mut cfields = vec![field_val]; - - let offset = machine::llsize_of_alloc(ccx, val_ty(field_val)); - let size = un.stride().bytes(); - if offset != size { + let size = layout.size(ccx); + if offset < size { cfields.push(padding(ccx, size - offset)); } - cfields + Const::new(C_struct(ccx, &cfields, st.packed), layout.ty) } -fn padding(ccx: &CrateContext, size: u64) -> ValueRef { - C_undef(Type::array(&Type::i8(ccx), size)) +fn padding(ccx: &CrateContext, size: Size) -> ValueRef { + C_undef(Type::array(&Type::i8(ccx), size.bytes())) } diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 5faaef6ebff..376d42c71ad 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -10,7 +10,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutTyper}; +use rustc::ty::layout::{self, Align, LayoutTyper}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -19,7 +19,6 @@ use base; use builder::Builder; use common::{self, CrateContext, C_usize, C_u8, C_i32, C_int, C_null, val_ty}; use consts; -use machine; use type_of; use type_::Type; use value::Value; @@ -56,18 +55,15 @@ impl Alignment { } } - pub fn to_align(self) -> Option<u32> { + pub fn to_align(self) -> Option<Align> { match self { - Alignment::Packed => Some(1), + Alignment::Packed => Some(Align::from_bytes(1, 1).unwrap()), Alignment::AbiAligned => None, } } - pub fn min_with(self, align: u32) -> Option<u32> { - match self { - Alignment::Packed => Some(1), - Alignment::AbiAligned => Some(align), - } + pub fn min_with(self, align: Option<Align>) -> Option<Align> { + self.to_align().or(align) } } @@ -153,7 +149,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { // The unit-like case might have a nonzero number of unit-like fields. // (e.d., Result of Either with (), as one side.) let ty = type_of::type_of(ccx, fty); - assert_eq!(machine::llsize_of_alloc(ccx, ty), 0); + assert_eq!(ccx.size_of(fty).bytes(), 0); return (bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed); } layout::RawNullablePointer { .. } => { @@ -174,7 +170,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let ptr_val = if let layout::General { discr, .. } = *l { let variant_ty = Type::struct_(ccx, &adt::struct_llfields(ccx, l.ty, l.variant_index.unwrap(), st, - Some(discr.to_ty(&bcx.tcx(), false))), st.packed); + Some(discr.to_ty(bcx.tcx(), false))), st.packed); bcx.pointercast(self.llval, variant_ty.ptr_to()) } else { self.llval @@ -374,6 +370,14 @@ impl<'a, 'tcx> LvalueRef<'tcx> { bcx.inbounds_gep(self.llval, &[zero, llindex]) } } + + pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) { + bcx.lifetime_start(self.llval, bcx.ccx.size_of(self.ty.to_ty(bcx.tcx()))); + } + + pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) { + bcx.lifetime_end(self.llval, bcx.ccx.size_of(self.ty.to_ty(bcx.tcx()))); + } } impl<'a, 'tcx> MirContext<'a, 'tcx> { @@ -432,7 +436,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::ProjectionElem::Index(index) => { let index = &mir::Operand::Consume(mir::Lvalue::Local(index)); let index = self.trans_operand(bcx, index); - let llindex = self.prepare_index(bcx, index.immediate()); + let llindex = index.immediate(); ((tr_base.project_index(bcx, llindex), align), ptr::null_mut()) } mir::ProjectionElem::ConstantIndex { offset, @@ -487,22 +491,6 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { result } - /// Adjust the bitwidth of an index since LLVM is less forgiving - /// than we are. - /// - /// nmatsakis: is this still necessary? Not sure. - fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef { - let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex)); - let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.isize_ty()); - if index_size < int_size { - bcx.zext(llindex, bcx.ccx.isize_ty()) - } else if index_size > int_size { - bcx.trunc(llindex, bcx.ccx.isize_ty()) - } else { - llindex - } - } - pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> { let tcx = self.ccx.tcx(); let lvalue_ty = lvalue.ty(self.mir, tcx); diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 59da80035fd..1cb13c973f9 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -61,7 +61,7 @@ pub struct MirContext<'a, 'tcx:'a> { /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. - llpersonalityslot: Option<ValueRef>, + personality_slot: Option<LvalueRef<'tcx>>, /// A `Block` for each MIR `BasicBlock` blocks: IndexVec<mir::BasicBlock, BasicBlockRef>, @@ -177,9 +177,8 @@ enum LocalRef<'tcx> { Operand(Option<OperandRef<'tcx>>), } -impl<'tcx> LocalRef<'tcx> { - fn new_operand<'a>(ccx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>) -> LocalRef<'tcx> { +impl<'a, 'tcx> LocalRef<'tcx> { + fn new_operand(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> LocalRef<'tcx> { if common::type_is_zero_size(ccx, ty) { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -232,7 +231,7 @@ pub fn trans_mir<'a, 'tcx: 'a>( llfn, fn_ty, ccx, - llpersonalityslot: None, + personality_slot: None, blocks: block_bcxs, unreachable_block: None, cleanup_kinds, @@ -470,7 +469,7 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, let val = if common::type_is_fat_ptr(bcx.ccx, arg_ty) { let meta = &mircx.fn_ty.args[idx]; idx += 1; - assert_eq!((meta.cast, meta.pad), (None, None)); + assert!(meta.cast.is_none() && meta.pad.is_none()); let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); llarg_idx += 1; diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 9ce1749190b..47350d07125 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -10,7 +10,7 @@ use llvm::ValueRef; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{Layout, LayoutTyper}; +use rustc::ty::layout::{Align, Layout, LayoutTyper}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -310,7 +310,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { pub fn store_operand(&mut self, bcx: &Builder<'a, 'tcx>, lldest: ValueRef, - align: Option<u32>, + align: Option<Align>, operand: OperandRef<'tcx>) { debug!("store_operand: operand={:?}, align={:?}", operand, align); // Avoid generating stores of zero-sized values, because the only way to have a zero-sized @@ -319,10 +319,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { return; } match operand.val { - OperandValue::Ref(r, Alignment::Packed) => - base::memcpy_ty(bcx, lldest, r, operand.ty, Some(1)), - OperandValue::Ref(r, Alignment::AbiAligned) => - base::memcpy_ty(bcx, lldest, r, operand.ty, align), + OperandValue::Ref(r, source_align) => + base::memcpy_ty(bcx, lldest, r, operand.ty, + source_align.min_with(align)), OperandValue::Immediate(s) => { bcx.store(base::from_immediate(bcx, s), lldest, align); } @@ -331,7 +330,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { Layout::Univariant { ref variant, .. } => { (adt::struct_llfields_index(variant, 0), adt::struct_llfields_index(variant, 1), - if variant.packed { Some(1) } else { None }) + if variant.packed { Some(variant.align) } else { None }) } _ => (0, 1, align) }; diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index bc263fd60a2..7e4b7235750 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -22,10 +22,10 @@ use std::{u128, i128}; use base; use builder::Builder; use callee; -use common::{self, val_ty, C_bool, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral}; +use common::{self, val_ty}; +use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral}; use consts; use adt; -use machine; use monomorphize; use type_::Type; use type_of; @@ -104,33 +104,31 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } let tr_elem = self.trans_operand(&bcx, elem); - let size = count.as_u64(); - let size = C_usize(bcx.ccx, size); + let count = count.as_u64(); + let count = C_usize(bcx.ccx, count); let base = base::get_dataptr(&bcx, dest.llval); let align = dest.alignment.to_align(); if let OperandValue::Immediate(v) = tr_elem.val { + let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); + let align = C_i32(bcx.ccx, align.abi() as i32); + let size = C_usize(bcx.ccx, bcx.ccx.size_of(dest_ty).bytes()); + // Use llvm.memset.p0i8.* to initialize all zero arrays if common::is_const_integral(v) && common::const_to_uint(v) == 0 { - let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); - let align = C_i32(bcx.ccx, align as i32); - let ty = type_of::type_of(bcx.ccx, dest_ty); - let size = machine::llsize_of(bcx.ccx, ty); - let fill = C_uint(Type::i8(bcx.ccx), 0); + let fill = C_u8(bcx.ccx, 0); base::call_memset(&bcx, base, fill, size, align, false); return bcx; } // Use llvm.memset.p0i8.* to initialize byte arrays if common::val_ty(v) == Type::i8(bcx.ccx) { - let align = align.unwrap_or_else(|| bcx.ccx.align_of(tr_elem.ty)); - let align = C_i32(bcx.ccx, align as i32); base::call_memset(&bcx, base, v, size, align, false); return bcx; } } - tvec::slice_for_each(&bcx, base, tr_elem.ty, size, |bcx, llslot, loop_bb| { + tvec::slice_for_each(&bcx, base, tr_elem.ty, count, |bcx, llslot, loop_bb| { self.store_operand(bcx, llslot, align, tr_elem); bcx.br(loop_bb); }) @@ -459,7 +457,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { assert!(bcx.ccx.shared().type_is_sized(ty)); - let val = C_usize(bcx.ccx, bcx.ccx.size_of(ty)); + let val = C_usize(bcx.ccx, bcx.ccx.size_of(ty).bytes()); let tcx = bcx.tcx(); (bcx, OperandRef { val: OperandValue::Immediate(val), @@ -469,12 +467,11 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); - let llty = type_of::type_of(bcx.ccx, content_ty); - let llsize = machine::llsize_of(bcx.ccx, llty); - let align = bcx.ccx.align_of(content_ty); - let llalign = C_usize(bcx.ccx, align as u64); - let llty_ptr = llty.ptr_to(); + let (size, align) = bcx.ccx.size_and_align_of(content_ty); + let llsize = C_usize(bcx.ccx, size.bytes()); + let llalign = C_usize(bcx.ccx, align.abi()); let box_ty = bcx.tcx().mk_box(content_ty); + let llty_ptr = type_of::type_of(bcx.ccx, box_ty); // Allocate space: let def_id = match bcx.tcx().lang_items().require(ExchangeMallocFnLangItem) { diff --git a/src/librustc_trans/mir/statement.rs b/src/librustc_trans/mir/statement.rs index 6e9b1f36c2c..2559b21c46b 100644 --- a/src/librustc_trans/mir/statement.rs +++ b/src/librustc_trans/mir/statement.rs @@ -10,7 +10,6 @@ use rustc::mir; -use base; use asm; use common; use builder::Builder; @@ -63,10 +62,16 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { bcx } mir::StatementKind::StorageLive(local) => { - self.trans_storage_liveness(bcx, local, base::Lifetime::Start) + if let LocalRef::Lvalue(tr_lval) = self.locals[local] { + tr_lval.storage_live(&bcx); + } + bcx } mir::StatementKind::StorageDead(local) => { - self.trans_storage_liveness(bcx, local, base::Lifetime::End) + if let LocalRef::Lvalue(tr_lval) = self.locals[local] { + tr_lval.storage_dead(&bcx); + } + bcx } mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { @@ -86,15 +91,4 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::StatementKind::Nop => bcx, } } - - fn trans_storage_liveness(&self, - bcx: Builder<'a, 'tcx>, - index: mir::Local, - intrinsic: base::Lifetime) - -> Builder<'a, 'tcx> { - if let LocalRef::Lvalue(tr_lval) = self.locals[index] { - intrinsic.call(&bcx, tr_lval.llval); - } - bcx - } } diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index cac09a81361..f74aec07087 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -11,9 +11,8 @@ use abi::FnType; use adt; use common::*; -use machine; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::LayoutTyper; +use rustc::ty::layout::{Align, LayoutTyper, Size}; use trans_item::DefPathBasedNames; use type_::Type; @@ -212,19 +211,26 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> } impl<'a, 'tcx> CrateContext<'a, 'tcx> { - pub fn align_of(&self, ty: Ty<'tcx>) -> machine::llalign { - self.layout_of(ty).align(self).abi() as machine::llalign + pub fn align_of(&self, ty: Ty<'tcx>) -> Align { + self.layout_of(ty).align(self) } - pub fn size_of(&self, ty: Ty<'tcx>) -> machine::llsize { - self.layout_of(ty).size(self).bytes() as machine::llsize + pub fn size_of(&self, ty: Ty<'tcx>) -> Size { + self.layout_of(ty).size(self) } - pub fn over_align_of(&self, t: Ty<'tcx>) - -> Option<machine::llalign> { + pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) { + let layout = self.layout_of(ty); + (layout.size(self), layout.align(self)) + } + + /// Returns alignment if it is different than the primitive alignment. + pub fn over_align_of(&self, t: Ty<'tcx>) -> Option<Align> { let layout = self.layout_of(t); - if let Some(align) = layout.over_align(&self.tcx().data_layout) { - Some(align as machine::llalign) + let align = layout.align(self); + let primitive_align = layout.primitive_align(self); + if align != primitive_align { + Some(align) } else { None } diff --git a/src/rustllvm/RustWrapper.cpp b/src/rustllvm/RustWrapper.cpp index 20ea8d70302..c8d974febf2 100644 --- a/src/rustllvm/RustWrapper.cpp +++ b/src/rustllvm/RustWrapper.cpp @@ -257,21 +257,18 @@ extern "C" void LLVMRustSetHasUnsafeAlgebra(LLVMValueRef V) { extern "C" LLVMValueRef LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMValueRef Source, const char *Name, - LLVMAtomicOrdering Order, unsigned Alignment) { + LLVMAtomicOrdering Order) { LoadInst *LI = new LoadInst(unwrap(Source), 0); LI->setAtomic(fromRust(Order)); - LI->setAlignment(Alignment); return wrap(unwrap(B)->Insert(LI, Name)); } extern "C" LLVMValueRef LLVMRustBuildAtomicStore(LLVMBuilderRef B, LLVMValueRef V, LLVMValueRef Target, - LLVMAtomicOrdering Order, - unsigned Alignment) { + LLVMAtomicOrdering Order) { StoreInst *SI = new StoreInst(unwrap(V), unwrap(Target)); SI->setAtomic(fromRust(Order)); - SI->setAlignment(Alignment); return wrap(unwrap(B)->Insert(SI)); } |
