diff options
| author | Eduard-Mihai Burtescu <edy.burt@gmail.com> | 2017-09-19 23:43:55 +0300 |
|---|---|---|
| committer | Eduard-Mihai Burtescu <edy.burt@gmail.com> | 2017-11-19 02:14:31 +0200 |
| commit | d0ab6e8644ded75c9a43b46151568f6b782bec59 (patch) | |
| tree | cd53da56721e9ae60bcea4c966e4d1cd35e50564 | |
| parent | fad99542c8643984b7630d8e297007aef824b268 (diff) | |
| download | rust-d0ab6e8644ded75c9a43b46151568f6b782bec59.tar.gz rust-d0ab6e8644ded75c9a43b46151568f6b782bec59.zip | |
rustc_trans: compute LLVM types from type layouts, not Rust types.
| -rw-r--r-- | src/librustc/ty/layout.rs | 8 | ||||
| -rw-r--r-- | src/librustc_trans/adt.rs | 196 | ||||
| -rw-r--r-- | src/librustc_trans/lib.rs | 1 | ||||
| -rw-r--r-- | src/librustc_trans/meth.rs | 2 | ||||
| -rw-r--r-- | src/librustc_trans/mir/lvalue.rs | 65 | ||||
| -rw-r--r-- | src/librustc_trans/type_.rs | 7 | ||||
| -rw-r--r-- | src/librustc_trans/type_of.rs | 285 |
7 files changed, 184 insertions, 380 deletions
diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 4d74e5eed59..d905592347f 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1541,10 +1541,10 @@ impl<'a, 'tcx> Layout { discr_range: (min as u64)..=(max as u64), variants }, - fields: FieldPlacement::Arbitrary { - offsets: vec![Size::from_bytes(0)], - memory_index: vec![0] - }, + // FIXME(eddyb): using `FieldPlacement::Arbitrary` here results + // in lost optimizations, specifically around allocations, see + // `test/codegen/{alloc-optimisation,vec-optimizes-away}.rs`. + fields: FieldPlacement::Union(1), abi: if discr.size(dl) == size { Abi::Scalar(discr) } else { diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs deleted file mode 100644 index 07c64c35c07..00000000000 --- a/src/librustc_trans/adt.rs +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2013 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or -// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license -// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -//! # Representation of Algebraic Data Types -//! -//! This module determines how to represent enums, structs, and tuples -//! based on their monomorphized types; it is responsible both for -//! choosing a representation and translating basic operations on -//! values of those types. (Note: exporting the representations for -//! debuggers is handled in debuginfo.rs, not here.) -//! -//! Note that the interface treats everything as a general case of an -//! enum, so structs/tuples/etc. have one pseudo-variant with -//! discriminant 0; i.e., as if they were a univariant enum. -//! -//! Having everything in one place will enable improvements to data -//! structure representation; possibilities include: -//! -//! - User-specified alignment (e.g., cacheline-aligning parts of -//! concurrently accessed data structures); LLVM can't represent this -//! directly, so we'd have to insert padding fields in any structure -//! that might contain one and adjust GEP indices accordingly. See -//! issue #4578. -//! -//! - Store nested enums' discriminants in the same word. Rather, if -//! some variants start with enums, and those enums representations -//! have unused alignment padding between discriminant and body, the -//! outer enum's discriminant can be stored there and those variants -//! can start at offset 0. Kind of fancy, and might need work to -//! make copies of the inner enum type cooperate, but it could help -//! with `Option` or `Result` wrapped around another enum. -//! -//! - Tagged pointers would be neat, but given that any type can be -//! used unboxed and any field can have pointers (including mutable) -//! taken to it, implementing them for Rust seems difficult. - -use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size, FullLayout}; - -use context::CrateContext; -use type_::Type; - -/// LLVM-level types are a little complicated. -/// -/// C-like enums need to be actual ints, not wrapped in a struct, -/// because that changes the ABI on some platforms (see issue #10308). -/// -/// For nominal types, in some cases, we need to use LLVM named structs -/// and fill in the actual contents in a second pass to prevent -/// unbounded recursion; see also the comments in `trans::type_of`. -pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - generic_type_of(cx, t, None) -} - -pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, name: &str) -> Type { - generic_type_of(cx, t, Some(name)) -} - -pub fn finish_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, llty: &mut Type) { - let l = cx.layout_of(t); - debug!("finish_type_of: {} with layout {:#?}", t, l); - if let layout::Abi::Scalar(_) = l.abi { - return; - } - match *l.layout { - layout::Univariant => { - let is_enum = if let ty::TyAdt(def, _) = t.sty { - def.is_enum() - } else { - false - }; - let variant_layout = if is_enum { - l.for_variant(0) - } else { - l - }; - llty.set_struct_body(&struct_llfields(cx, variant_layout), - variant_layout.is_packed()) - } - - _ => {} - } -} - -fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - t: Ty<'tcx>, - name: Option<&str>) -> Type { - let l = cx.layout_of(t); - debug!("adt::generic_type_of {:#?} name: {:?}", l, name); - if let layout::Abi::Scalar(value) = l.abi { - return cx.llvm_type_of(value.to_ty(cx.tcx())); - } - match *l.layout { - layout::Univariant => { - match name { - None => { - Type::struct_(cx, &struct_llfields(cx, l), l.is_packed()) - } - Some(name) => { - Type::named_struct(cx, name) - } - } - } - _ => { - let align = l.align(cx); - let abi_align = align.abi(); - let elem_ty = if let Some(ity) = layout::Integer::for_abi_align(cx, align) { - Type::from_integer(cx, ity) - } else { - let vec_align = cx.data_layout().vector_align(Size::from_bytes(abi_align)); - assert_eq!(vec_align.abi(), abi_align); - Type::vector(&Type::i32(cx), abi_align / 4) - }; - - let size = l.size(cx).bytes(); - assert_eq!(size % abi_align, 0); - let fill = Type::array(&elem_ty, size / abi_align); - match name { - None => { - Type::struct_(cx, &[fill], l.is_packed()) - } - Some(name) => { - let mut llty = Type::named_struct(cx, name); - llty.set_struct_body(&[fill], l.is_packed()); - llty - } - } - } - } -} - -/// Double an index and add 1 to account for padding. -pub fn memory_index_to_gep(index: u64) -> u64 { - 1 + index * 2 -} - -pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>) -> Vec<Type> { - debug!("struct_llfields: {:#?}", layout); - let align = layout.align(cx); - let size = layout.size(cx); - let field_count = layout.fields.count(); - - let mut offset = Size::from_bytes(0); - let mut result: Vec<Type> = Vec::with_capacity(1 + field_count * 2); - for i in layout.fields.index_by_increasing_offset() { - let field = layout.field(cx, i); - let target_offset = layout.fields.offset(i as usize); - debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", - i, field, offset, target_offset); - assert!(target_offset >= offset); - let padding = target_offset - offset; - result.push(Type::array(&Type::i8(cx), padding.bytes())); - debug!(" padding before: {:?}", padding); - - let llty = cx.llvm_type_of(field.ty); - result.push(llty); - - if layout.is_packed() { - assert_eq!(padding.bytes(), 0); - } else { - let field_align = field.align(cx); - assert!(field_align.abi() <= align.abi(), - "non-packed type has field with larger align ({}): {:#?}", - field_align.abi(), layout); - } - - offset = target_offset + field.size(cx); - } - if !layout.is_unsized() && field_count > 0 { - if offset > size { - bug!("layout: {:#?} stride: {:?} offset: {:?}", - layout, size, offset); - } - let padding = size - offset; - debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", - padding, offset, size); - result.push(Type::array(&Type::i8(cx), padding.bytes())); - assert!(result.len() == 1 + field_count * 2); - } else { - debug!("struct_llfields: offset: {:?} stride: {:?}", - offset, size); - } - - result -} - diff --git a/src/librustc_trans/lib.rs b/src/librustc_trans/lib.rs index dd33012e900..83fc1017316 100644 --- a/src/librustc_trans/lib.rs +++ b/src/librustc_trans/lib.rs @@ -104,7 +104,6 @@ pub mod back { } mod abi; -mod adt; mod allocator; mod asm; mod assert_module_sources; diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 2289adb01ea..8dbef1f8d08 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -77,7 +77,7 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, } // Not in the cache. Build it. - let nullptr = C_null(Type::nil(ccx).ptr_to()); + let nullptr = C_null(Type::i8p(ccx)); let (size, align) = ccx.size_and_align_of(ty); let mut components: Vec<_> = [ diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index b21e4ffc2c3..b72ccf6ba28 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -10,17 +10,16 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, Align, FullLayout, Layout, LayoutOf}; +use rustc::ty::layout::{self, Align, FullLayout, LayoutOf}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; use abi; -use adt; use base; use builder::Builder; use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, val_ty}; use consts; -use type_of::LayoutLlvmExt; +use type_of::{self, LayoutLlvmExt}; use type_::Type; use value::Value; use glue; @@ -206,52 +205,26 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let alignment = self.alignment | Alignment::from(l); // Unions and newtypes only use an offset of 0. - match *l.layout { - // FIXME(eddyb) The fields of a fat pointer aren't correct, especially - // to unsized structs, we can't represent their pointee types in `Ty`. - Layout::FatPointer { .. } => {} - - _ if offset == 0 => { - let ty = ccx.llvm_type_of(field.ty); - return LvalueRef { - llval: bcx.pointercast(self.llval, ty.ptr_to()), - llextra: if field.is_unsized() { - self.llextra - } else { - ptr::null_mut() - }, - ty: LvalueTy::from_ty(field.ty), - alignment, - }; - } - - _ => {} - } - - // Discriminant field of enums. - if let layout::NullablePointer { .. } = *l.layout { - let ty = ccx.llvm_type_of(field.ty); - let size = field.size(ccx).bytes(); - - // If the discriminant is not on a multiple of the primitive's size, - // we need to go through i8*. Also assume the worst alignment. - if offset % size != 0 { - let byte_ptr = bcx.pointercast(self.llval, Type::i8p(ccx)); - let byte_ptr = bcx.inbounds_gep(byte_ptr, &[C_usize(ccx, offset)]); - let byte_align = Alignment::Packed(Align::from_bytes(1, 1).unwrap()); - return LvalueRef::new_sized( - bcx.pointercast(byte_ptr, ty.ptr_to()), field.ty, byte_align); + let has_llvm_fields = match *l.fields { + layout::FieldPlacement::Union(_) => false, + layout::FieldPlacement::Array { .. } => true, + layout::FieldPlacement::Arbitrary { .. } => { + match l.abi { + layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => false, + layout::Abi::Aggregate { .. } => true + } } - - let discr_ptr = bcx.pointercast(self.llval, ty.ptr_to()); - return LvalueRef::new_sized( - bcx.inbounds_gep(discr_ptr, &[C_usize(ccx, offset / size)]), - field.ty, alignment); - } + }; let simple = || { LvalueRef { - llval: bcx.struct_gep(self.llval, l.llvm_field_index(ix)), + llval: if has_llvm_fields { + bcx.struct_gep(self.llval, l.llvm_field_index(ix)) + } else { + assert_eq!(offset, 0); + let ty = ccx.llvm_type_of(field.ty); + bcx.pointercast(self.llval, ty.ptr_to()) + }, llextra: if ccx.shared().type_has_metadata(field.ty) { self.llextra } else { @@ -460,7 +433,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { layout::General { .. } => { let variant_layout = layout.for_variant(variant_index); let variant_ty = Type::struct_(bcx.ccx, - &adt::struct_llfields(bcx.ccx, variant_layout), + &type_of::struct_llfields(bcx.ccx, variant_layout), variant_layout.is_packed()); downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); } diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index bb8f3f23108..dbdc8919da9 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -66,10 +66,6 @@ impl Type { ty!(llvm::LLVMVoidTypeInContext(ccx.llcx())) } - pub fn nil(ccx: &CrateContext) -> Type { - Type::empty_struct(ccx) - } - pub fn metadata(ccx: &CrateContext) -> Type { ty!(llvm::LLVMRustMetadataTypeInContext(ccx.llcx())) } @@ -202,9 +198,6 @@ impl Type { ty!(llvm::LLVMStructCreateNamed(ccx.llcx(), name.as_ptr())) } - pub fn empty_struct(ccx: &CrateContext) -> Type { - Type::struct_(ccx, &[], false) - } pub fn array(ty: &Type, len: u64) -> Type { ty!(llvm::LLVMRustArrayType(ty.to_ref(), len)) diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index bd37bfb01d7..7474e71a715 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -9,10 +9,9 @@ // except according to those terms. use abi::FnType; -use adt; use common::*; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, Align, Layout, LayoutOf, Size, FullLayout}; +use rustc::ty::layout::{self, HasDataLayout, Align, LayoutOf, Size, FullLayout}; use trans_item::DefPathBasedNames; use type_::Type; @@ -43,30 +42,10 @@ pub fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> } } -fn compute_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { - // Check the cache. - if let Some(&llty) = cx.lltypes().borrow().get(&t) { - return llty; - } - - debug!("type_of {:?}", t); - - assert!(!t.has_escaping_regions(), "{:?} has escaping regions", t); - - // Replace any typedef'd types with their equivalent non-typedef - // type. This ensures that all LLVM nominal types that contain - // Rust types are defined as the same LLVM types. If we don't do - // this then, e.g. `Option<{myfield: bool}>` would be a different - // type than `Option<myrec>`. - let t_norm = cx.tcx().erase_regions(&t); - - if t != t_norm { - let llty = cx.llvm_type_of(t_norm); - debug!("--> normalized {:?} to {:?} llty={:?}", t, t_norm, llty); - cx.lltypes().borrow_mut().insert(t, llty); - return llty; - } - +fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + ty: Ty<'tcx>, + defer: &mut Option<(Type, FullLayout<'tcx>)>) + -> Type { let ptr_ty = |ty: Ty<'tcx>| { if cx.shared().type_has_metadata(ty) { if let ty::TyStr = ty.sty { @@ -88,97 +67,130 @@ fn compute_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type cx.llvm_type_of(ty).ptr_to() } }; + match ty.sty { + ty::TyRef(_, ty::TypeAndMut{ty, ..}) | + ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { + return ptr_ty(ty); + } + ty::TyAdt(def, _) if def.is_box() => { + return ptr_ty(ty.boxed_ty()); + } + ty::TyFnPtr(sig) => { + let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig); + return FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to(); + } + _ => {} + } + + let layout = cx.layout_of(ty); + if let layout::Abi::Scalar(value) = layout.abi { + let llty = match value { + layout::Int(layout::I1, _) => Type::i8(cx), + layout::Int(i, _) => Type::from_integer(cx, i), + layout::F32 => Type::f32(cx), + layout::F64 => Type::f64(cx), + layout::Pointer => cx.llvm_type_of(layout::Pointer.to_ty(cx.tcx())) + }; + return llty; + } + + if let layout::Abi::Vector { .. } = layout.abi { + return Type::vector(&cx.llvm_type_of(layout.field(cx, 0).ty), + layout.fields.count() as u64); + } - let mut llty = match t.sty { - ty::TyBool => Type::bool(cx), - ty::TyChar => Type::char(cx), - ty::TyInt(t) => Type::int_from_ty(cx, t), - ty::TyUint(t) => Type::uint_from_ty(cx, t), - ty::TyFloat(t) => Type::float_from_ty(cx, t), - ty::TyNever => Type::nil(cx), - ty::TyClosure(..) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. - adt::incomplete_type_of(cx, t, "closure") - } - ty::TyGenerator(..) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. - adt::incomplete_type_of(cx, t, "generator") - } - - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - ptr_ty(ty) - } - ty::TyAdt(def, _) if def.is_box() => { - ptr_ty(t.boxed_ty()) - } - - ty::TyArray(ty, size) => { - let llty = cx.llvm_type_of(ty); - let size = size.val.to_const_int().unwrap().to_u64().unwrap(); - Type::array(&llty, size) - } - - ty::TySlice(ty) => { - Type::array(&cx.llvm_type_of(ty), 0) - } - ty::TyStr => { - Type::array(&Type::i8(cx), 0) - } - ty::TyDynamic(..) | - ty::TyForeign(..) => adt::type_of(cx, t), - - ty::TyFnDef(..) => Type::nil(cx), - ty::TyFnPtr(sig) => { - let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig); - FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to() - } - ty::TyTuple(ref tys, _) if tys.is_empty() => Type::nil(cx), - ty::TyTuple(..) => { - adt::type_of(cx, t) - } - ty::TyAdt(..) if t.is_simd() => { - let e = t.simd_type(cx.tcx()); - if !e.is_machine() { - cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \ - a non-machine element type `{}`", - t, e)) - } - let llet = cx.llvm_type_of(e); - let n = t.simd_size(cx.tcx()) as u64; - Type::vector(&llet, n) - } - ty::TyAdt(..) => { - // Only create the named struct, but don't fill it in. We - // fill it in *after* placing it into the type cache. This - // avoids creating more than one copy of the enum when one - // of the enum's variants refers to the enum itself. - let name = llvm_type_name(cx, t); - adt::incomplete_type_of(cx, t, &name[..]) - } - - ty::TyInfer(..) | - ty::TyProjection(..) | - ty::TyParam(..) | - ty::TyAnon(..) | - ty::TyError => bug!("type_of with {:?}", t), + let name = match ty.sty { + ty::TyClosure(..) | ty::TyGenerator(..) | ty::TyAdt(..) => { + let mut name = String::with_capacity(32); + let printer = DefPathBasedNames::new(cx.tcx(), true, true); + printer.push_type_name(ty, &mut name); + Some(name) + } + _ => None }; - debug!("--> mapped t={:?} to llty={:?}", t, llty); + match *layout.fields { + layout::FieldPlacement::Union(_) => { + let size = layout.size(cx).bytes(); + let fill = Type::array(&Type::i8(cx), size); + match name { + None => { + Type::struct_(cx, &[fill], layout.is_packed()) + } + Some(ref name) => { + let mut llty = Type::named_struct(cx, name); + llty.set_struct_body(&[fill], layout.is_packed()); + llty + } + } + } + layout::FieldPlacement::Array { count, .. } => { + Type::array(&cx.llvm_type_of(layout.field(cx, 0).ty), count) + } + layout::FieldPlacement::Arbitrary { .. } => { + match name { + None => { + Type::struct_(cx, &struct_llfields(cx, layout), layout.is_packed()) + } + Some(ref name) => { + let llty = Type::named_struct(cx, name); + *defer = Some((llty, layout)); + llty + } + } + } + } +} + +pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, + layout: FullLayout<'tcx>) -> Vec<Type> { + debug!("struct_llfields: {:#?}", layout); + let align = layout.align(cx); + let size = layout.size(cx); + let field_count = layout.fields.count(); + + let mut offset = Size::from_bytes(0); + let mut result: Vec<Type> = Vec::with_capacity(1 + field_count * 2); + for i in layout.fields.index_by_increasing_offset() { + let field = layout.field(cx, i); + let target_offset = layout.fields.offset(i as usize); + debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", + i, field, offset, target_offset); + assert!(target_offset >= offset); + let padding = target_offset - offset; + result.push(Type::array(&Type::i8(cx), padding.bytes())); + debug!(" padding before: {:?}", padding); - cx.lltypes().borrow_mut().insert(t, llty); + let llty = cx.llvm_type_of(field.ty); + result.push(llty); - // If this was an enum or struct, fill in the type now. - match t.sty { - ty::TyAdt(..) | ty::TyClosure(..) | ty::TyGenerator(..) if !t.is_simd() && !t.is_box() => { - adt::finish_type_of(cx, t, &mut llty); + if layout.is_packed() { + assert_eq!(padding.bytes(), 0); + } else { + let field_align = field.align(cx); + assert!(field_align.abi() <= align.abi(), + "non-packed type has field with larger align ({}): {:#?}", + field_align.abi(), layout); } - _ => () + + offset = target_offset + field.size(cx); + } + if !layout.is_unsized() && field_count > 0 { + if offset > size { + bug!("layout: {:#?} stride: {:?} offset: {:?}", + layout, size, offset); + } + let padding = size - offset; + debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", + padding, offset, size); + result.push(Type::array(&Type::i8(cx), padding.bytes())); + assert!(result.len() == 1 + field_count * 2); + } else { + debug!("struct_llfields: offset: {:?} stride: {:?}", + offset, size); } - llty + result } impl<'a, 'tcx> CrateContext<'a, 'tcx> { @@ -219,7 +231,38 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. pub fn llvm_type_of(&self, ty: Ty<'tcx>) -> Type { - compute_llvm_type(self, ty) + // Check the cache. + if let Some(&llty) = self.lltypes().borrow().get(&ty) { + return llty; + } + + debug!("type_of {:?}", ty); + + assert!(!ty.has_escaping_regions(), "{:?} has escaping regions", ty); + + // Make sure lifetimes are erased, to avoid generating distinct LLVM + // types for Rust types that only differ in the choice of lifetimes. + let normal_ty = self.tcx().erase_regions(&ty); + + if ty != normal_ty { + let llty = self.llvm_type_of(normal_ty); + debug!("--> normalized {:?} to {:?} llty={:?}", ty, normal_ty, llty); + self.lltypes().borrow_mut().insert(ty, llty); + return llty; + } + + let mut defer = None; + let llty = uncached_llvm_type(self, ty, &mut defer); + + debug!("--> mapped ty={:?} to llty={:?}", ty, llty); + + self.lltypes().borrow_mut().insert(ty, llty); + + if let Some((mut llty, layout)) = defer { + llty.set_struct_body(&struct_llfields(self, layout), layout.is_packed()) + } + + llty } pub fn immediate_llvm_type_of(&self, ty: Ty<'tcx>) -> Type { @@ -240,26 +283,18 @@ impl<'tcx> LayoutLlvmExt for FullLayout<'tcx> { if let layout::Abi::Scalar(_) = self.abi { bug!("FullLayout::llvm_field_index({:?}): not applicable", self); } - let index = self.fields.memory_index(index); - match *self.layout { - Layout::Vector | Layout::Array => { - index as u64 + match *self.fields { + layout::FieldPlacement::Union(_) => { + bug!("FullLayout::llvm_field_index({:?}): not applicable", self) } - Layout::FatPointer | Layout::Univariant => { - adt::memory_index_to_gep(index as u64) + layout::FieldPlacement::Array { .. } => { + index as u64 } - _ => { - bug!("FullLayout::llvm_field_index({:?}): not applicable", self) + layout::FieldPlacement::Arbitrary { .. } => { + 1 + (self.fields.memory_index(index) as u64) * 2 } } } } - -fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String { - let mut name = String::with_capacity(32); - let printer = DefPathBasedNames::new(cx.tcx(), true, true); - printer.push_type_name(ty, &mut name); - name -} |
