From 0a1fcc32a65c87646fe1613ea00c9447f04a646b Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Mon, 26 Jun 2017 18:33:50 +0300 Subject: rustc_trans: use *[T; 0] for slice data pointers instead of *T. --- src/test/codegen/function-arguments.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/test/codegen/function-arguments.rs') diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 29e2840c881..0bacb816241 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -132,7 +132,7 @@ pub fn trait_borrow(_: &Drop) { pub fn trait_box(_: Box) { } -// CHECK: { i16*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) +// CHECK: { [0 x i16]*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) #[no_mangle] pub fn return_slice(x: &[u16]) -> &[u16] { x -- cgit 1.4.1-3-g733a5 From 8afa3a01e61906459a25d305176137e14ba3f835 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 25 Jun 2017 12:42:55 +0300 Subject: rustc_trans: always insert alignment padding, even before the first field. --- src/librustc_trans/adt.rs | 19 +++++++------------ src/librustc_trans/common.rs | 25 ++++++++++++++++++++++++- src/librustc_trans/context.rs | 12 +++++++----- src/librustc_trans/mir/constant.rs | 18 +++++------------- src/librustc_trans/type_of.rs | 15 ++++++++++++--- src/test/codegen/adjustments.rs | 7 ++++--- src/test/codegen/consts.rs | 4 ++-- src/test/codegen/function-arguments.rs | 3 ++- src/test/codegen/refs.rs | 5 +++-- 9 files changed, 66 insertions(+), 42 deletions(-) (limited to 'src/test/codegen/function-arguments.rs') diff --git a/src/librustc_trans/adt.rs b/src/librustc_trans/adt.rs index c2988cd3da3..c1242f57139 100644 --- a/src/librustc_trans/adt.rs +++ b/src/librustc_trans/adt.rs @@ -202,9 +202,9 @@ fn union_fill(cx: &CrateContext, size: Size, align: Align) -> Type { Type::array(&elem_ty, size / abi_align) } -/// Double an index to account for padding. +/// Double an index and add 1 to account for padding. pub fn memory_index_to_gep(index: u64) -> u64 { - index * 2 + 1 + index * 2 } pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, @@ -213,9 +213,8 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, discr: Option>) -> Vec { let field_count = (discr.is_some() as usize) + layout.field_count(); debug!("struct_llfields: variant: {:?}", variant); - let mut first_field = true; let mut offset = Size::from_bytes(0); - let mut result: Vec = Vec::with_capacity(field_count * 2); + let mut result: Vec = Vec::with_capacity(1 + field_count * 2); let field_iter = variant.field_index_by_increasing_offset().map(|i| { let ty = if i == 0 && discr.is_some() { cx.layout_of(discr.unwrap()) @@ -229,13 +228,9 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, index, field, offset, target_offset); assert!(target_offset >= offset); let padding = target_offset - offset; - if first_field { - assert_eq!(padding.bytes(), 0); - first_field = false; - } else { - result.push(Type::array(&Type::i8(cx), padding.bytes())); - debug!(" padding before: {:?}", padding); - } + result.push(Type::array(&Type::i8(cx), padding.bytes())); + debug!(" padding before: {:?}", padding); + let llty = cx.llvm_type_of(field.ty); result.push(llty); @@ -259,7 +254,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, debug!("struct_llfields: pad_bytes: {:?} offset: {:?} min_size: {:?} stride: {:?}", padding, offset, variant.min_size, variant.stride()); result.push(Type::array(&Type::i8(cx), padding.bytes())); - assert!(result.len() == (field_count * 2)); + assert!(result.len() == 1 + field_count * 2); } else { debug!("struct_llfields: offset: {:?} min_size: {:?} stride: {:?}", offset, variant.min_size, variant.stride()); diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 749c5393e43..2e010ccee48 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -18,6 +18,7 @@ use llvm::{True, False, Bool, OperandBundleDef}; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use rustc::middle::lang_items::LangItem; +use abi; use base; use builder::Builder; use consts; @@ -267,7 +268,29 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { let len = s.len(); let cs = consts::ptrcast(C_cstr(cx, s, false), cx.llvm_type_of(cx.tcx().mk_str()).ptr_to()); - C_named_struct(cx.str_slice_type(), &[cs, C_usize(cx, len as u64)]) + let empty = C_array(Type::i8(cx), &[]); + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + C_named_struct(cx.str_slice_type(), &[ + empty, + cs, + empty, + C_usize(cx, len as u64), + empty + ]) +} + +pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef { + let empty = C_array(Type::i8(cx), &[]); + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + C_struct(cx, &[ + empty, + ptr, + empty, + meta, + empty + ], false) } pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef { diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 4e003edac3c..ac5f4372286 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -395,11 +395,13 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { let dummy_ccx = LocalCrateContext::dummy_ccx(shared, local_ccxs.as_mut_slice()); let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice"); - - let llptrty = dummy_ccx.llvm_type_of(shared.tcx.mk_str()).ptr_to(); - str_slice_ty.set_struct_body(&[llptrty, - Type::isize(&dummy_ccx)], - false); + str_slice_ty.set_struct_body(&[ + Type::array(&Type::i8(&dummy_ccx), 0), + dummy_ccx.llvm_type_of(shared.tcx.mk_str()).ptr_to(), + Type::array(&Type::i8(&dummy_ccx), 0), + Type::isize(&dummy_ccx), + Type::array(&Type::i8(&dummy_ccx), 0) + ], false); (Type::isize(&dummy_ccx), str_slice_ty) }; (isize_ty, str_slice_ty, local_ccxs.pop().unwrap()) diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index e253701903f..1b3559a50e3 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -29,7 +29,7 @@ use callee; use builder::Builder; use common::{self, CrateContext, const_get_elt, val_ty}; use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u64}; -use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector}; +use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr}; use common::const_to_opt_u128; use consts; use type_of::{self, LayoutLlvmExt}; @@ -675,9 +675,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { .insert(base, operand.llval); assert!(prev_const.is_none() || prev_const == Some(operand.llval)); } - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - C_struct(self.ccx, &[base, info], false) + C_fat_ptr(self.ccx, base, info) } mir::CastKind::Misc if common::type_is_immediate(self.ccx, operand.ty) => { debug_assert!(common::type_is_immediate(self.ccx, cast_ty)); @@ -734,7 +732,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { if common::type_is_fat_ptr(self.ccx, cast_ty) { let llcast_ty = type_of::fat_ptr_base_ty(self.ccx, cast_ty); let data_cast = consts::ptrcast(data_ptr, llcast_ty); - C_struct(self.ccx, &[data_cast, meta], false) + C_fat_ptr(self.ccx, data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. @@ -777,7 +775,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { let ptr = if self.ccx.shared().type_is_sized(ty) { base } else { - C_struct(self.ccx, &[base, tr_lvalue.llextra], false) + C_fat_ptr(self.ccx, base, tr_lvalue.llextra) }; Const::new(ptr, ref_ty) } @@ -1176,14 +1174,8 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, let parts = st.field_index_by_increasing_offset().map(|i| { (vals[i], st.offsets[i]) }); - let mut first_field = true; for (val, target_offset) in parts { - if first_field { - first_field = false; - assert_eq!(target_offset.bytes(), 0); - } else { - cfields.push(padding(ccx, target_offset - offset)); - } + cfields.push(padding(ccx, target_offset - offset)); cfields.push(val.llval); offset = target_offset + ccx.size_of(val.ty); } diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index d1305957634..f86bc17d20a 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -76,7 +76,13 @@ fn compute_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type } else { let ptr_ty = cx.llvm_type_of(ty).ptr_to(); let info_ty = unsized_info_ty(cx, ty); - Type::struct_(cx, &[ptr_ty, info_ty], false) + Type::struct_(cx, &[ + Type::array(&Type::i8(cx), 0), + ptr_ty, + Type::array(&Type::i8(cx), 0), + info_ty, + Type::array(&Type::i8(cx), 0) + ], false) } } else { cx.llvm_type_of(ty).ptr_to() @@ -240,11 +246,14 @@ impl<'tcx> LayoutLlvmExt for TyLayout<'tcx> { } Layout::Vector { .. } | - Layout::Array { .. } | - Layout::FatPointer { .. } => { + Layout::Array { .. } => { index as u64 } + Layout::FatPointer { .. } => { + adt::memory_index_to_gep(index as u64) + } + Layout::Univariant { ref variant, .. } => { adt::memory_index_to_gep(variant.memory_index[index] as u64) } diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index 56f9b98b482..8a680f1c9d6 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -9,6 +9,7 @@ // except according to those terms. // compile-flags: -C no-prepopulate-passes +// ignore-tidy-linelength #![crate_type = "lib"] @@ -24,9 +25,9 @@ pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { // We used to generate an extra alloca and memcpy for the block's trailing expression value, so // check that we copy directly to the return value slot // CHECK: %x.ptr = bitcast i8* %0 to [0 x i8]* -// CHECK: %1 = insertvalue { [0 x i8]*, [[USIZE]] } undef, [0 x i8]* %x.ptr, 0 -// CHECK: %2 = insertvalue { [0 x i8]*, [[USIZE]] } %1, [[USIZE]] %x.meta, 1 -// CHECK: ret { [0 x i8]*, [[USIZE]] } %2 +// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.ptr, 1 +// CHECK: %2 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1, [[USIZE]] %x.meta, 3 +// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %2 { x } } diff --git a/src/test/codegen/consts.rs b/src/test/codegen/consts.rs index a75b8f3992d..705488b7757 100644 --- a/src/test/codegen/consts.rs +++ b/src/test/codegen/consts.rs @@ -54,7 +54,7 @@ pub fn inline_enum_const() -> E { #[no_mangle] pub fn low_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] +// CHECK: load {{.*}} bitcast ({ [0 x i8], i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] *&E::A(0) } @@ -62,6 +62,6 @@ pub fn low_align_const() -> E { #[no_mangle] pub fn high_align_const() -> E { // Check that low_align_const and high_align_const use the same constant -// CHECK: load {{.*}} bitcast ({ i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] +// CHECK: load {{.*}} bitcast ({ [0 x i8], i16, [0 x i8], i16, [4 x i8] }** [[LOW_HIGH_REF]] *&E::A(0) } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 0bacb816241..5d073670d86 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -9,6 +9,7 @@ // except according to those terms. // compile-flags: -C no-prepopulate-passes +// ignore-tidy-linelength #![crate_type = "lib"] #![feature(custom_attribute)] @@ -132,7 +133,7 @@ pub fn trait_borrow(_: &Drop) { pub fn trait_box(_: Box) { } -// CHECK: { [0 x i16]*, [[USIZE]] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) +// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) #[no_mangle] pub fn return_slice(x: &[u16]) -> &[u16] { x diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index d191bedee5d..ad799247f59 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -9,6 +9,7 @@ // except according to those terms. // compile-flags: -C no-prepopulate-passes +// ignore-tidy-linelength #![crate_type = "lib"] @@ -23,9 +24,9 @@ pub fn helper(_: usize) { pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 0 +// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 1 // CHECK: store [0 x i8]* %s.ptr, [0 x i8]** [[X0]] -// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1 +// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 3 // CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]] let x = &*s; -- cgit 1.4.1-3-g733a5 From b723af284a22e6c0f2d85c104067138c33f8859d Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Thu, 21 Sep 2017 20:40:50 +0300 Subject: rustc_trans: go through layouts uniformly for fat pointers and variants. --- src/librustc/lint/context.rs | 6 +- src/librustc/ty/layout.rs | 233 ++++++++++++++++--------------- src/librustc_lint/types.rs | 2 +- src/librustc_llvm/ffi.rs | 5 - src/librustc_trans/abi.rs | 12 +- src/librustc_trans/base.rs | 17 ++- src/librustc_trans/cabi_s390x.rs | 4 +- src/librustc_trans/cabi_x86.rs | 4 +- src/librustc_trans/cabi_x86_64.rs | 6 +- src/librustc_trans/common.rs | 32 +---- src/librustc_trans/context.rs | 44 +++--- src/librustc_trans/debuginfo/metadata.rs | 14 +- src/librustc_trans/meth.rs | 6 +- src/librustc_trans/mir/analyze.rs | 14 +- src/librustc_trans/mir/block.rs | 33 +++-- src/librustc_trans/mir/constant.rs | 21 +-- src/librustc_trans/mir/lvalue.rs | 61 +++----- src/librustc_trans/mir/mod.rs | 35 ++--- src/librustc_trans/mir/operand.rs | 6 +- src/librustc_trans/mir/rvalue.rs | 16 ++- src/librustc_trans/type_.rs | 4 - src/librustc_trans/type_of.rs | 208 +++++++++++++-------------- src/test/codegen/adjustments.rs | 7 +- src/test/codegen/function-arguments.rs | 14 +- src/test/codegen/refs.rs | 6 +- 25 files changed, 363 insertions(+), 447 deletions(-) (limited to 'src/test/codegen/function-arguments.rs') diff --git a/src/librustc/lint/context.rs b/src/librustc/lint/context.rs index a080f968da4..4496e07b138 100644 --- a/src/librustc/lint/context.rs +++ b/src/librustc/lint/context.rs @@ -35,7 +35,7 @@ use rustc_serialize::{Decoder, Decodable, Encoder, Encodable}; use session::{config, early_error, Session}; use traits::Reveal; use ty::{self, TyCtxt, Ty}; -use ty::layout::{FullLayout, LayoutError, LayoutOf}; +use ty::layout::{LayoutError, LayoutOf, TyLayout}; use util::nodemap::FxHashMap; use std::default::Default as StdDefault; @@ -628,9 +628,9 @@ impl<'a, 'tcx> LateContext<'a, 'tcx> { } impl<'a, 'tcx> LayoutOf> for &'a LateContext<'a, 'tcx> { - type FullLayout = Result, LayoutError<'tcx>>; + type TyLayout = Result, LayoutError<'tcx>>; - fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { (self.tcx, self.param_env.reveal_all()).layout_of(ty) } } diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 3e64a6a4c5d..7bf7d81037d 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -23,7 +23,7 @@ use std::fmt; use std::i64; use std::iter; use std::mem; -use std::ops::{Add, Sub, Mul, AddAssign, RangeInclusive}; +use std::ops::{Add, Sub, Mul, AddAssign, Deref, RangeInclusive}; use ich::StableHashingContext; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, @@ -907,6 +907,7 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { #[derive(PartialEq, Eq, Hash, Debug)] pub struct CachedLayout { + pub variant_index: Option, pub layout: Layout, pub fields: FieldPlacement, pub abi: Abi, @@ -948,6 +949,7 @@ impl<'a, 'tcx> Layout { let dl = cx.data_layout(); let scalar = |value| { tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Scalar, fields: FieldPlacement::Union(0), abi: Abi::Scalar(value) @@ -962,7 +964,7 @@ impl<'a, 'tcx> Layout { /// A univariant, but part of an enum. EnumVariant(Integer), } - let univariant_uninterned = |fields: &[FullLayout], repr: &ReprOptions, kind| { + let univariant_uninterned = |fields: &[TyLayout], repr: &ReprOptions, kind| { let packed = repr.packed(); if packed && repr.align > 0 { bug!("struct cannot be packed and aligned"); @@ -1085,6 +1087,7 @@ impl<'a, 'tcx> Layout { } Ok(CachedLayout { + variant_index: None, layout: Layout::Univariant, fields: FieldPlacement::Arbitrary { offsets, @@ -1099,7 +1102,7 @@ impl<'a, 'tcx> Layout { } }) }; - let univariant = |fields: &[FullLayout], repr: &ReprOptions, kind| { + let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| { Ok(tcx.intern_layout(univariant_uninterned(fields, repr, kind)?)) }; assert!(!ty.has_infer_types()); @@ -1129,6 +1132,7 @@ impl<'a, 'tcx> Layout { memory_index: vec![0, 1] }; Ok(tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Univariant, fields, abi: Abi::Aggregate { @@ -1185,6 +1189,7 @@ impl<'a, 'tcx> Layout { .ok_or(LayoutError::SizeOverflow(ty))?; tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Array, fields: FieldPlacement::Array { stride: element_size, @@ -1202,6 +1207,7 @@ impl<'a, 'tcx> Layout { ty::TySlice(element) => { let element = cx.layout_of(element)?; tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Array, fields: FieldPlacement::Array { stride: element.size(dl), @@ -1218,6 +1224,7 @@ impl<'a, 'tcx> Layout { } ty::TyStr => { tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Array, fields: FieldPlacement::Array { stride: Size::from_bytes(1), @@ -1286,6 +1293,7 @@ impl<'a, 'tcx> Layout { } }; tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::Vector, fields: FieldPlacement::Array { stride: element.size(tcx), @@ -1343,6 +1351,7 @@ impl<'a, 'tcx> Layout { } return Ok(tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::UntaggedUnion, fields: FieldPlacement::Union(variants[0].len()), abi: Abi::Aggregate { @@ -1372,7 +1381,11 @@ impl<'a, 'tcx> Layout { else { StructKind::AlwaysSized } }; - return univariant(&variants[0], &def.repr, kind); + let mut cached = univariant_uninterned(&variants[0], &def.repr, kind)?; + if def.is_enum() { + cached.variant_index = Some(0); + } + return Ok(tcx.intern_layout(cached)); } let no_explicit_discriminants = def.variants.iter().enumerate() @@ -1389,12 +1402,14 @@ impl<'a, 'tcx> Layout { for (field_index, field) in variants[i].iter().enumerate() { if let Some((offset, discr)) = field.non_zero_field(cx)? { - let st = vec![ + let mut st = vec![ univariant_uninterned(&variants[0], &def.repr, StructKind::AlwaysSized)?, univariant_uninterned(&variants[1], &def.repr, StructKind::AlwaysSized)? ]; + st[0].variant_index = Some(0); + st[1].variant_index = Some(1); let offset = st[i].fields.offset(field_index) + offset; let mut abi = st[i].abi; if offset.bytes() == 0 && discr.size(dl) == abi.size(dl) { @@ -1418,6 +1433,7 @@ impl<'a, 'tcx> Layout { _ => {} } return Ok(tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::NullablePointer { nndiscr: i as u64, @@ -1454,13 +1470,13 @@ impl<'a, 'tcx> Layout { assert_eq!(Integer::for_abi_align(dl, start_align), None); // Create the set of structs that represent each variant. - let mut variants = variants.into_iter().map(|field_layouts| { - let st = univariant_uninterned(&field_layouts, + let mut variants = variants.into_iter().enumerate().map(|(i, field_layouts)| { + let mut st = univariant_uninterned(&field_layouts, &def.repr, StructKind::EnumVariant(min_ity))?; + st.variant_index = Some(i); // Find the first field we can't move later // to make room for a larger discriminant. - for i in st.fields.index_by_increasing_offset() { - let field = field_layouts[i]; + for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) { let field_align = field.align(dl); if !field.is_zst() || field_align.abi() != 1 { start_align = start_align.min(field_align); @@ -1539,6 +1555,7 @@ impl<'a, 'tcx> Layout { let discr = Int(ity, signed); tcx.intern_layout(CachedLayout { + variant_index: None, layout: Layout::General { discr, @@ -1587,7 +1604,7 @@ impl<'a, 'tcx> Layout { fn record_layout_for_printing(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, - layout: FullLayout<'tcx>) { + layout: TyLayout<'tcx>) { // If we are running with `-Zprint-type-sizes`, record layouts for // dumping later. Ignore layouts that are done with non-empty // environments or non-monomorphic layouts, as the user only wants @@ -1607,7 +1624,7 @@ impl<'a, 'tcx> Layout { fn record_layout_for_printing_outlined(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, - layout: FullLayout<'tcx>) { + layout: TyLayout<'tcx>) { let cx = (tcx, param_env); // (delay format until we actually need it) let record = |kind, opt_discr_size, variants| { @@ -1644,7 +1661,7 @@ impl<'a, 'tcx> Layout { let build_variant_info = |n: Option, flds: &[ast::Name], - layout: FullLayout<'tcx>| { + layout: TyLayout<'tcx>| { let mut min_size = Size::from_bytes(0); let field_info: Vec<_> = flds.iter().enumerate().map(|(i, &name)| { match layout.field(cx, i) { @@ -1685,7 +1702,7 @@ impl<'a, 'tcx> Layout { } }; - match *layout.layout { + match layout.layout { Layout::Univariant => { let variant_names = || { adt_def.variants.iter().map(|v|format!("{}", v.name)).collect::>() @@ -1723,7 +1740,7 @@ impl<'a, 'tcx> Layout { layout.for_variant(i)) }) .collect(); - record(adt_kind.into(), match *layout.layout { + record(adt_kind.into(), match layout.layout { Layout::General { discr, .. } => Some(discr.size(tcx)), _ => None }, variant_infos); @@ -1901,12 +1918,16 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> { /// layouts for which Rust types do not exist, such as enum variants /// or synthetic fields of enums (i.e. discriminants) and fat pointers. #[derive(Copy, Clone, Debug)] -pub struct FullLayout<'tcx> { +pub struct TyLayout<'tcx> { pub ty: Ty<'tcx>, - pub variant_index: Option, - pub layout: &'tcx Layout, - pub fields: &'tcx FieldPlacement, - pub abi: Abi, + cached: &'tcx CachedLayout +} + +impl<'tcx> Deref for TyLayout<'tcx> { + type Target = &'tcx CachedLayout; + fn deref(&self) -> &&'tcx CachedLayout { + &self.cached + } } pub trait HasTyCtxt<'tcx>: HasDataLayout { @@ -1937,29 +1958,42 @@ impl<'a, 'gcx, 'tcx, T: Copy> HasTyCtxt<'gcx> for (TyCtxt<'a, 'gcx, 'tcx>, T) { } } +pub trait MaybeResult { + fn map_same T>(self, f: F) -> Self; +} + +impl MaybeResult for T { + fn map_same T>(self, f: F) -> Self { + f(self) + } +} + +impl MaybeResult for Result { + fn map_same T>(self, f: F) -> Self { + self.map(f) + } +} + pub trait LayoutOf { - type FullLayout; + type TyLayout; - fn layout_of(self, ty: T) -> Self::FullLayout; + fn layout_of(self, ty: T) -> Self::TyLayout; } impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) { - type FullLayout = Result, LayoutError<'tcx>>; + type TyLayout = Result, LayoutError<'tcx>>; /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. #[inline] - fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { let (tcx, param_env) = self; let ty = tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); let cached = tcx.layout_raw(param_env.reveal_all().and(ty))?; - let layout = FullLayout { + let layout = TyLayout { ty, - variant_index: None, - layout: &cached.layout, - fields: &cached.fields, - abi: cached.abi + cached }; // NB: This recording is normally disabled; when enabled, it @@ -1976,22 +2010,19 @@ impl<'a, 'tcx> LayoutOf> for (TyCtxt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, ty::ParamEnv<'tcx>) { - type FullLayout = Result, LayoutError<'tcx>>; + type TyLayout = Result, LayoutError<'tcx>>; /// Computes the layout of a type. Note that this implicitly /// executes in "reveal all" mode. #[inline] - fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { let (tcx_at, param_env) = self; let ty = tcx_at.tcx.normalize_associated_type_in_env(&ty, param_env.reveal_all()); let cached = tcx_at.layout_raw(param_env.reveal_all().and(ty))?; - let layout = FullLayout { + let layout = TyLayout { ty, - variant_index: None, - layout: &cached.layout, - fields: &cached.fields, - abi: cached.abi + cached }; // NB: This recording is normally disabled; when enabled, it @@ -2006,79 +2037,57 @@ impl<'a, 'tcx> LayoutOf> for (ty::maps::TyCtxtAt<'a, 'tcx, 'tcx>, } } -impl<'a, 'tcx> FullLayout<'tcx> { +impl<'a, 'tcx> TyLayout<'tcx> { pub fn for_variant(&self, variant_index: usize) -> Self { - let variants = match self.ty.sty { - ty::TyAdt(def, _) if def.is_enum() => &def.variants[..], - _ => &[] - }; - let count = if variants.is_empty() { - 0 - } else { - variants[variant_index].fields.len() - }; - - let (layout, fields, abi) = match *self.layout { - Layout::Univariant => (self.layout, self.fields, self.abi), - + let cached = match self.layout { Layout::NullablePointer { ref variants, .. } | Layout::General { ref variants, .. } => { - let variant = &variants[variant_index]; - (&variant.layout, &variant.fields, variant.abi) + &variants[variant_index] } - _ => bug!() + _ => self.cached }; - assert_eq!(fields.count(), count); - - FullLayout { - variant_index: Some(variant_index), - layout, - fields, - abi, - ..*self + assert_eq!(cached.variant_index, Some(variant_index)); + + TyLayout { + ty: self.ty, + cached } } - fn field_type_unnormalized(&self, tcx: TyCtxt<'a, 'tcx, 'tcx>, i: usize) -> Ty<'tcx> { - let ptr_field_type = |pointee: Ty<'tcx>| { + pub fn field(&self, cx: C, i: usize) -> C::TyLayout + where C: LayoutOf> + HasTyCtxt<'tcx>, + C::TyLayout: MaybeResult> + { + let tcx = cx.tcx(); + let ptr_field_layout = |pointee: Ty<'tcx>| { assert!(i < 2); - let mk_ptr = |ty: Ty<'tcx>| { - match self.ty.sty { - ty::TyRef(r, ty::TypeAndMut { mutbl, .. }) => { - tcx.mk_ref(r, ty::TypeAndMut { ty, mutbl }) - } - ty::TyRawPtr(ty::TypeAndMut { mutbl, .. }) => { - tcx.mk_ptr(ty::TypeAndMut { ty, mutbl }) - } - ty::TyAdt(def, _) if def.is_box() => { - tcx.mk_box(ty) - } - _ => bug!() - } - }; - let slice = |element: Ty<'tcx>| { - if i == 0 { - mk_ptr(element) - } else { - tcx.types.usize - } - }; - match tcx.struct_tail(pointee).sty { - ty::TySlice(element) => slice(element), - ty::TyStr => slice(tcx.types.u8), + + // Reuse the fat *T type as its own thin pointer data field. + // This provides information about e.g. DST struct pointees + // (which may have no non-DST form), and will work as long + // as the `Abi` or `FieldPlacement` is checked by users. + if i == 0 { + return cx.layout_of(Pointer.to_ty(tcx)).map_same(|mut ptr_layout| { + ptr_layout.ty = self.ty; + ptr_layout + }); + } + + let meta_ty = match tcx.struct_tail(pointee).sty { + ty::TySlice(_) | + ty::TyStr => tcx.types.usize, ty::TyDynamic(..) => { - if i == 0 { - mk_ptr(tcx.mk_nil()) - } else { - Pointer.to_ty(tcx) - } + // FIXME(eddyb) use an usize/fn() array with + // the correct number of vtables slots. + tcx.mk_imm_ref(tcx.types.re_static, tcx.mk_nil()) } - _ => bug!("FullLayout::field_type({:?}): not applicable", self) - } + _ => bug!("TyLayout::field_type({:?}): not applicable", self) + }; + cx.layout_of(meta_ty) }; - match self.ty.sty { + cx.layout_of(match self.ty.sty { ty::TyBool | ty::TyChar | ty::TyInt(_) | @@ -2089,16 +2098,16 @@ impl<'a, 'tcx> FullLayout<'tcx> { ty::TyFnDef(..) | ty::TyDynamic(..) | ty::TyForeign(..) => { - bug!("FullLayout::field_type({:?}): not applicable", self) + bug!("TyLayout::field_type({:?}): not applicable", self) } // Potentially-fat pointers. ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) | ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => { - ptr_field_type(pointee) + return ptr_field_layout(pointee); } ty::TyAdt(def, _) if def.is_box() => { - ptr_field_type(self.ty.boxed_ty()) + return ptr_field_layout(self.ty.boxed_ty()); } // Arrays and slices. @@ -2126,16 +2135,16 @@ impl<'a, 'tcx> FullLayout<'tcx> { ty::TyAdt(def, substs) => { let v = if def.is_enum() { match self.variant_index { - None => match *self.layout { + None => match self.layout { // Discriminant field for enums (where applicable). Layout::General { discr, .. } | Layout::NullablePointer { discr, .. } => { - return [discr.to_ty(tcx)][i]; + return cx.layout_of([discr.to_ty(tcx)][i]); + } + _ => { + bug!("TyLayout::field_type: enum `{}` has no discriminant", + self.ty) } - _ if def.variants.len() > 1 => return [][i], - - // Enums with one variant behave like structs. - _ => 0 }, Some(v) => v } @@ -2148,16 +2157,9 @@ impl<'a, 'tcx> FullLayout<'tcx> { ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) | ty::TyInfer(_) | ty::TyError => { - bug!("FullLayout::field_type: unexpected type `{}`", self.ty) + bug!("TyLayout::field_type: unexpected type `{}`", self.ty) } - } - } - - pub fn field> + HasTyCtxt<'tcx>>(&self, - cx: C, - i: usize) - -> C::FullLayout { - cx.layout_of(self.field_type_unnormalized(cx.tcx(), i)) + }) } /// Returns true if the layout corresponds to an unsized type. @@ -2198,11 +2200,11 @@ impl<'a, 'tcx> FullLayout<'tcx> { // FIXME(eddyb) track value ranges and traverse already optimized enums. fn non_zero_field(&self, cx: C) -> Result, LayoutError<'tcx>> - where C: LayoutOf, FullLayout = Result>> + + where C: LayoutOf, TyLayout = Result>> + HasTyCtxt<'tcx> { let tcx = cx.tcx(); - match (self.layout, self.abi, &self.ty.sty) { + match (&self.layout, self.abi, &self.ty.sty) { // FIXME(eddyb) check this via value ranges on scalars. (&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyRef(..)) | (&Layout::Scalar, Abi::Scalar(Pointer), &ty::TyFnPtr(..)) => { @@ -2238,7 +2240,7 @@ impl<'a, 'tcx> FullLayout<'tcx> { // Perhaps one of the fields is non-zero, let's recurse and find out. _ => { - if let FieldPlacement::Array { count, .. } = *self.fields { + if let FieldPlacement::Array { count, .. } = self.fields { if count > 0 { return self.field(cx, 0)?.non_zero_field(cx); } @@ -2341,6 +2343,7 @@ impl<'gcx> HashStable> for Abi { } impl_stable_hash_for!(struct ::ty::layout::CachedLayout { + variant_index, layout, fields, abi diff --git a/src/librustc_lint/types.rs b/src/librustc_lint/types.rs index dd5e97544c8..e0c7bc66876 100644 --- a/src/librustc_lint/types.rs +++ b/src/librustc_lint/types.rs @@ -753,7 +753,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences { bug!("failed to get layout for `{}`: {}", t, e) }); - if let Layout::General { ref variants, discr, .. } = *layout.layout { + if let Layout::General { ref variants, discr, .. } = layout.layout { let discr_size = discr.size(cx.tcx).bytes(); debug!("enum `{}` is {} bytes large with layout:\n{:#?}", diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index 0f96a22f897..fdc27d4e041 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -1316,11 +1316,6 @@ extern "C" { ElementCount: c_uint, Packed: Bool); - pub fn LLVMConstNamedStruct(S: TypeRef, - ConstantVals: *const ValueRef, - Count: c_uint) - -> ValueRef; - /// Enables LLVM debug output. pub fn LLVMRustSetDebug(Enabled: c_int); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index bd9a460846b..8fa55b6ef74 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -36,7 +36,7 @@ use type_of::LayoutLlvmExt; use rustc::hir; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, Size, FullLayout}; +use rustc::ty::layout::{self, Align, Size, TyLayout}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; use rustc_back::PanicStrategy; @@ -275,7 +275,7 @@ pub trait LayoutExt<'tcx> { fn homogeneous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option; } -impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { +impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { fn is_aggregate(&self) -> bool { match self.abi { layout::Abi::Scalar(_) | @@ -311,7 +311,7 @@ impl<'tcx> LayoutExt<'tcx> for FullLayout<'tcx> { let mut total = Size::from_bytes(0); let mut result = None; - let is_union = match *self.fields { + let is_union = match self.fields { layout::FieldPlacement::Array { count, .. } => { if count > 0 { return self.field(ccx, 0).homogeneous_aggregate(ccx); @@ -424,7 +424,7 @@ impl CastTarget { #[derive(Debug)] pub struct ArgType<'tcx> { kind: ArgKind, - pub layout: FullLayout<'tcx>, + pub layout: TyLayout<'tcx>, /// Cast target, either a single uniform or a pair of registers. pub cast: Option, /// Dummy argument, which is emitted before the real argument. @@ -435,7 +435,7 @@ pub struct ArgType<'tcx> { } impl<'a, 'tcx> ArgType<'tcx> { - fn new(layout: FullLayout<'tcx>) -> ArgType<'tcx> { + fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> { ArgType { kind: ArgKind::Direct, layout, @@ -610,7 +610,7 @@ impl<'a, 'tcx> FnType<'tcx> { let fn_ty = instance_ty(ccx.tcx(), &instance); let sig = ty_fn_sig(ccx, fn_ty); let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); - Self::new(ccx, sig, &[]) + FnType::new(ccx, sig, &[]) } pub fn new(ccx: &CrateContext<'a, 'tcx>, diff --git a/src/librustc_trans/base.rs b/src/librustc_trans/base.rs index 545e986d7d1..2f252c5e55e 100644 --- a/src/librustc_trans/base.rs +++ b/src/librustc_trans/base.rs @@ -28,6 +28,7 @@ use super::ModuleSource; use super::ModuleTranslation; use super::ModuleKind; +use abi; use assert_module_sources; use back::link; use back::symbol_export; @@ -40,7 +41,7 @@ use rustc::middle::lang_items::StartFnLangItem; use rustc::middle::trans::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata, EncodedMetadataHashes}; use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, FullLayout, LayoutOf}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; use rustc::ty::maps::Providers; use rustc::dep_graph::{DepNode, DepKind, DepConstructor}; use rustc::middle::cstore::{self, LinkMeta, LinkagePreference}; @@ -68,7 +69,7 @@ use symbol_names_test; use time_graph; use trans_item::{TransItem, BaseTransItemExt, TransItemExt, DefPathBasedNames}; use type_::Type; -use type_of::{self, LayoutLlvmExt}; +use type_of::LayoutLlvmExt; use rustc::util::nodemap::{NodeSet, FxHashMap, FxHashSet, DefIdSet}; use CrateInfo; @@ -203,8 +204,10 @@ pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>, old_info.expect("unsized_info: missing old info for trait upcast") } (_, &ty::TyDynamic(ref data, ..)) => { + let vtable_ptr = ccx.layout_of(ccx.tcx().mk_mut_ptr(target)) + .field(ccx, abi::FAT_PTR_EXTRA); consts::ptrcast(meth::get_vtable(ccx, source, data.principal()), - Type::vtable_ptr(ccx)) + vtable_ptr.llvm_type(ccx)) } _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, @@ -255,8 +258,8 @@ pub fn coerce_unsized_into<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, // i.e. &'a fmt::Debug+Send => &'a fmt::Debug // So we need to pointercast the base to ensure // the types match up. - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, dst_ty); - (bcx.pointercast(base, llcast_ty), info) + let thin_ptr = dst.layout.field(bcx.ccx, abi::FAT_PTR_ADDR); + (bcx.pointercast(base, thin_ptr.llvm_type(bcx.ccx)), info) } OperandValue::Immediate(base) => { unsize_thin_ptr(bcx, base, src_ty, dst_ty) @@ -371,7 +374,7 @@ pub fn from_immediate(bcx: &Builder, val: ValueRef) -> ValueRef { } } -pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::FullLayout) -> ValueRef { +pub fn to_immediate(bcx: &Builder, val: ValueRef, layout: layout::TyLayout) -> ValueRef { if let layout::Abi::Scalar(layout::Int(layout::I1, _)) = layout.abi { bcx.trunc(val, Type::i1(bcx.ccx)) } else { @@ -400,7 +403,7 @@ pub fn memcpy_ty<'a, 'tcx>( bcx: &Builder<'a, 'tcx>, dst: ValueRef, src: ValueRef, - layout: FullLayout<'tcx>, + layout: TyLayout<'tcx>, align: Option, ) { let ccx = bcx.ccx; diff --git a/src/librustc_trans/cabi_s390x.rs b/src/librustc_trans/cabi_s390x.rs index 2766edb59c1..ed598e0a86b 100644 --- a/src/librustc_trans/cabi_s390x.rs +++ b/src/librustc_trans/cabi_s390x.rs @@ -14,7 +14,7 @@ use abi::{FnType, ArgType, LayoutExt, Reg}; use context::CrateContext; -use rustc::ty::layout::{self, FullLayout}; +use rustc::ty::layout::{self, TyLayout}; fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) { if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 { @@ -25,7 +25,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc } fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>) -> bool { + layout: TyLayout<'tcx>) -> bool { match layout.abi { layout::Abi::Scalar(layout::F32) | layout::Abi::Scalar(layout::F64) => true, diff --git a/src/librustc_trans/cabi_x86.rs b/src/librustc_trans/cabi_x86.rs index 7d3621d53e0..26f130ec755 100644 --- a/src/librustc_trans/cabi_x86.rs +++ b/src/librustc_trans/cabi_x86.rs @@ -11,7 +11,7 @@ use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind}; use common::CrateContext; -use rustc::ty::layout::{self, FullLayout}; +use rustc::ty::layout::{self, TyLayout}; #[derive(PartialEq)] pub enum Flavor { @@ -20,7 +20,7 @@ pub enum Flavor { } fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>) -> bool { + layout: TyLayout<'tcx>) -> bool { match layout.abi { layout::Abi::Scalar(layout::F32) | layout::Abi::Scalar(layout::F64) => true, diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 00e8562c2a1..45f2b39b982 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -14,7 +14,7 @@ use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind}; use context::CrateContext; -use rustc::ty::layout::{self, Layout, FullLayout, Size}; +use rustc::ty::layout::{self, Layout, TyLayout, Size}; #[derive(Clone, Copy, PartialEq, Debug)] enum Class { @@ -53,7 +53,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>, + layout: TyLayout<'tcx>, cls: &mut [Class], off: Size) -> Result<(), Memory> { @@ -90,7 +90,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) // FIXME(eddyb) have to work around Rust enums for now. // Fix is either guarantee no data where there is no field, // by putting variants in fields, or be more clever. - match *layout.layout { + match layout.layout { Layout::General { .. } | Layout::NullablePointer { .. } => return Err(Memory), _ => {} diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index ed8f6583406..7ccac606923 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -54,20 +54,11 @@ pub fn type_is_fat_ptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> } } -pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { - let layout = ccx.layout_of(ty); - match layout.abi { - layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true, - - layout::Abi::Aggregate { .. } => layout.is_zst() - } -} - /// Returns true if the type is represented as a pair of immediates. pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { let layout = ccx.layout_of(ty); - match *layout.fields { + match layout.fields { layout::FieldPlacement::Arbitrary { .. } => { // There must be only 2 fields. if layout.fields.count() != 2 { @@ -75,8 +66,8 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) } // The two fields must be both immediates. - type_is_immediate(ccx, layout.field(ccx, 0).ty) && - type_is_immediate(ccx, layout.field(ccx, 1).ty) + layout.field(ccx, 0).is_llvm_immediate() && + layout.field(ccx, 1).is_llvm_immediate() } _ => false } @@ -256,16 +247,7 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { let len = s.len(); let cs = consts::ptrcast(C_cstr(cx, s, false), cx.layout_of(cx.tcx().mk_str()).llvm_type(cx).ptr_to()); - let empty = C_array(Type::i8(cx), &[]); - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - C_named_struct(cx.str_slice_type(), &[ - empty, - cs, - empty, - C_usize(cx, len as u64), - empty - ]) + C_fat_ptr(cx, cs, C_usize(cx, len as u64)) } pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef { @@ -293,12 +275,6 @@ pub fn C_struct_in_context(llcx: ContextRef, elts: &[ValueRef], packed: bool) -> } } -pub fn C_named_struct(t: Type, elts: &[ValueRef]) -> ValueRef { - unsafe { - llvm::LLVMConstNamedStruct(t.to_ref(), elts.as_ptr(), elts.len() as c_uint) - } -} - pub fn C_array(ty: Type, elts: &[ValueRef]) -> ValueRef { unsafe { return llvm::LLVMConstArray(ty.to_ref(), elts.as_ptr(), elts.len() as c_uint); diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index 555acaad505..83efe6b7958 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -24,14 +24,13 @@ use monomorphize::Instance; use partitioning::CodegenUnit; use type_::Type; -use type_of::LayoutLlvmExt; use rustc_data_structures::base_n; use rustc::middle::trans::Stats; use rustc_data_structures::stable_hasher::StableHashingContextProvider; use rustc::session::config::{self, NoDebugInfo}; use rustc::session::Session; -use rustc::ty::layout::{LayoutError, LayoutOf, FullLayout}; +use rustc::ty::layout::{LayoutError, LayoutOf, TyLayout}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; use rustc_trans_utils; @@ -101,9 +100,9 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> { /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details used_statics: RefCell>, - lltypes: RefCell, Type>>, + lltypes: RefCell, Option), Type>>, + scalar_lltypes: RefCell, Type>>, isize_ty: Type, - str_slice_type: Type, dbg_cx: Option>, @@ -378,8 +377,8 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { statics_to_rauw: RefCell::new(Vec::new()), used_statics: RefCell::new(Vec::new()), lltypes: RefCell::new(FxHashMap()), + scalar_lltypes: RefCell::new(FxHashMap()), isize_ty: Type::from_ref(ptr::null_mut()), - str_slice_type: Type::from_ref(ptr::null_mut()), dbg_cx, eh_personality: Cell::new(None), eh_unwind_resume: Cell::new(None), @@ -389,28 +388,19 @@ impl<'a, 'tcx> LocalCrateContext<'a, 'tcx> { placeholder: PhantomData, }; - let (isize_ty, str_slice_ty, mut local_ccx) = { + let (isize_ty, mut local_ccx) = { // Do a little dance to create a dummy CrateContext, so we can // create some things in the LLVM module of this codegen unit let mut local_ccxs = vec![local_ccx]; - let (isize_ty, str_slice_ty) = { + let isize_ty = { let dummy_ccx = LocalCrateContext::dummy_ccx(shared, local_ccxs.as_mut_slice()); - let mut str_slice_ty = Type::named_struct(&dummy_ccx, "str_slice"); - str_slice_ty.set_struct_body(&[ - Type::array(&Type::i8(&dummy_ccx), 0), - dummy_ccx.layout_of(shared.tcx.mk_str()).llvm_type(&dummy_ccx).ptr_to(), - Type::array(&Type::i8(&dummy_ccx), 0), - Type::isize(&dummy_ccx), - Type::array(&Type::i8(&dummy_ccx), 0) - ], false); - (Type::isize(&dummy_ccx), str_slice_ty) + Type::isize(&dummy_ccx) }; - (isize_ty, str_slice_ty, local_ccxs.pop().unwrap()) + (isize_ty, local_ccxs.pop().unwrap()) }; local_ccx.isize_ty = isize_ty; - local_ccx.str_slice_type = str_slice_ty; local_ccx } @@ -515,10 +505,14 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().used_statics } - pub fn lltypes<'a>(&'a self) -> &'a RefCell, Type>> { + pub fn lltypes<'a>(&'a self) -> &'a RefCell, Option), Type>> { &self.local().lltypes } + pub fn scalar_lltypes<'a>(&'a self) -> &'a RefCell, Type>> { + &self.local().scalar_lltypes + } + pub fn stats<'a>(&'a self) -> &'a RefCell { &self.local().stats } @@ -527,10 +521,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { self.local().isize_ty } - pub fn str_slice_type(&self) -> Type { - self.local().str_slice_type - } - pub fn dbg_cx<'a>(&'a self) -> &'a Option> { &self.local().dbg_cx } @@ -669,9 +659,9 @@ impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CrateContext<'a, 'tcx> { } impl<'a, 'tcx> LayoutOf> for &'a SharedCrateContext<'a, 'tcx> { - type FullLayout = FullLayout<'tcx>; + type TyLayout = TyLayout<'tcx>; - fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { (self.tcx, ty::ParamEnv::empty(traits::Reveal::All)) .layout_of(ty) .unwrap_or_else(|e| match e { @@ -682,10 +672,10 @@ impl<'a, 'tcx> LayoutOf> for &'a SharedCrateContext<'a, 'tcx> { } impl<'a, 'tcx> LayoutOf> for &'a CrateContext<'a, 'tcx> { - type FullLayout = FullLayout<'tcx>; + type TyLayout = TyLayout<'tcx>; - fn layout_of(self, ty: Ty<'tcx>) -> Self::FullLayout { + fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout { self.shared.layout_of(ty) } } diff --git a/src/librustc_trans/debuginfo/metadata.rs b/src/librustc_trans/debuginfo/metadata.rs index f488ebaa4f5..1bb8aec92e5 100644 --- a/src/librustc_trans/debuginfo/metadata.rs +++ b/src/librustc_trans/debuginfo/metadata.rs @@ -32,7 +32,7 @@ use rustc::ty::util::TypeIdHasher; use rustc::ich::Fingerprint; use common::{self, CrateContext}; use rustc::ty::{self, AdtKind, Ty}; -use rustc::ty::layout::{self, Align, LayoutOf, Size, FullLayout}; +use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout}; use rustc::session::{Session, config}; use rustc::util::nodemap::FxHashMap; use rustc::util::common::path2cstr; @@ -1052,7 +1052,7 @@ fn prepare_tuple_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, //=----------------------------------------------------------------------------- struct UnionMemberDescriptionFactory<'tcx> { - layout: FullLayout<'tcx>, + layout: TyLayout<'tcx>, variant: &'tcx ty::VariantDef, span: Span, } @@ -1119,7 +1119,7 @@ fn prepare_union_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, // offset of zero bytes). struct EnumMemberDescriptionFactory<'tcx> { enum_type: Ty<'tcx>, - type_rep: FullLayout<'tcx>, + type_rep: TyLayout<'tcx>, discriminant_type_metadata: Option, containing_scope: DIScope, span: Span, @@ -1129,7 +1129,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { fn create_member_descriptions<'a>(&self, cx: &CrateContext<'a, 'tcx>) -> Vec { let adt = &self.enum_type.ty_adt_def().unwrap(); - match *self.type_rep.layout { + match self.type_rep.layout { layout::Layout::General { ref variants, .. } => { let discriminant_info = RegularDiscriminant(self.discriminant_type_metadata .expect("")); @@ -1220,7 +1220,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> { // of discriminant instead of us having to recover its path. fn compute_field_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &mut String, - layout: FullLayout<'tcx>, + layout: TyLayout<'tcx>, offset: Size, size: Size) { for i in 0..layout.fields.count() { @@ -1300,7 +1300,7 @@ enum EnumDiscriminantInfo { // descriptions of the fields of the variant. This is a rudimentary version of a // full RecursiveTypeDescription. fn describe_enum_variant<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - layout: layout::FullLayout<'tcx>, + layout: layout::TyLayout<'tcx>, variant: &'tcx ty::VariantDef, discriminant_info: EnumDiscriminantInfo, containing_scope: DIScope, @@ -1431,7 +1431,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let type_rep = cx.layout_of(enum_type); - let discriminant_type_metadata = match *type_rep.layout { + let discriminant_type_metadata = match type_rep.layout { layout::Layout::NullablePointer { .. } | layout::Layout::Univariant { .. } => None, layout::Layout::General { discr, .. } => Some(discriminant_type_metadata(discr)), diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 8dbef1f8d08..697f4ecd2be 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -9,6 +9,7 @@ // except according to those terms. use llvm::ValueRef; +use abi::FnType; use callee; use common::*; use builder::Builder; @@ -32,10 +33,13 @@ impl<'a, 'tcx> VirtualIndex { VirtualIndex(index as u64 + 3) } - pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, llvtable: ValueRef) -> ValueRef { + pub fn get_fn(self, bcx: &Builder<'a, 'tcx>, + llvtable: ValueRef, + fn_ty: &FnType<'tcx>) -> ValueRef { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", Value(llvtable), self); + let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.ccx).ptr_to().ptr_to()); let ptr = bcx.load_nonnull(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None); // Vtable loads are invariant bcx.set_invariant_load(ptr); diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index bca33a8c307..93780aefe4d 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -20,6 +20,7 @@ use rustc::mir::traversal; use rustc::ty; use rustc::ty::layout::LayoutOf; use common; +use type_of::LayoutLlvmExt; use super::MirContext; pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { @@ -31,21 +32,14 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { for (index, ty) in mir.local_decls.iter().map(|l| l.ty).enumerate() { let ty = mircx.monomorphize(&ty); debug!("local {} has type {:?}", index, ty); - if ty.is_scalar() || - ty.is_box() || - ty.is_region_ptr() || - ty.is_simd() || - mircx.ccx.layout_of(ty).is_zst() - { + if mircx.ccx.layout_of(ty).is_llvm_immediate() { // These sorts of types are immediates that we can store // in an ValueRef without an alloca. - assert!(common::type_is_immediate(mircx.ccx, ty) || - common::type_is_fat_ptr(mircx.ccx, ty)); } else if common::type_is_imm_pair(mircx.ccx, ty) { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that - // type_is_immediate() may *still* be true, particularly + // is_llvm_immediate() may *still* be true, particularly // for newtypes, but we currently force some types // (e.g. structs) into an alloca unconditionally, just so // that we don't have to deal with having two pathways @@ -179,9 +173,9 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { LvalueContext::StorageLive | LvalueContext::StorageDead | LvalueContext::Validate | - LvalueContext::Inspect | LvalueContext::Consume => {} + LvalueContext::Inspect | LvalueContext::Store | LvalueContext::Borrow { .. } | LvalueContext::Projection(..) => { diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index 401d4d52216..0528bf972de 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -274,13 +274,22 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } let lvalue = self.trans_lvalue(&bcx, location); - let fn_ty = FnType::of_instance(bcx.ccx, &drop_fn); - let (drop_fn, need_extra) = match ty.sty { - ty::TyDynamic(..) => (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra), - false), - _ => (callee::get_fn(bcx.ccx, drop_fn), lvalue.has_extra()) + let mut args: &[_] = &[lvalue.llval, lvalue.llextra]; + args = &args[..1 + lvalue.has_extra() as usize]; + let (drop_fn, fn_ty) = match ty.sty { + ty::TyDynamic(..) => { + let fn_ty = common::instance_ty(bcx.ccx.tcx(), &drop_fn); + let sig = common::ty_fn_sig(bcx.ccx, fn_ty); + let sig = bcx.tcx().erase_late_bound_regions_and_normalize(&sig); + let fn_ty = FnType::new_vtable(bcx.ccx, sig, &[]); + args = &args[..1]; + (meth::DESTRUCTOR.get_fn(&bcx, lvalue.llextra, &fn_ty), fn_ty) + } + _ => { + (callee::get_fn(bcx.ccx, drop_fn), + FnType::of_instance(bcx.ccx, &drop_fn)) + } }; - let args = &[lvalue.llval, lvalue.llextra][..1 + need_extra as usize]; do_call(self, bcx, fn_ty, drop_fn, args, Some((ReturnDest::Nothing, target)), unwind); @@ -561,15 +570,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { (&args[..], None) }; - for (idx, arg) in first_args.iter().enumerate() { + for (i, arg) in first_args.iter().enumerate() { let mut op = self.trans_operand(&bcx, arg); - if idx == 0 { + if i == 0 { if let Pair(_, meta) = op.val { if let Some(ty::InstanceDef::Virtual(_, idx)) = def { - let llmeth = meth::VirtualIndex::from_index(idx) - .get_fn(&bcx, meta); - let llty = fn_ty.llvm_type(bcx.ccx).ptr_to(); - llfn = Some(bcx.pointercast(llmeth, llty)); + llfn = Some(meth::VirtualIndex::from_index(idx) + .get_fn(&bcx, meta, &fn_ty)); } } } @@ -582,7 +589,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { op.val = Ref(tmp.llval, tmp.alignment); } - self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[idx]); + self.trans_argument(&bcx, op, &mut llargs, &fn_ty.args[i]); } if let Some(tup) = untuple { self.trans_arguments_untupled(&bcx, tup, &mut llargs, diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 94651185686..cc6b84a6715 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -32,7 +32,7 @@ use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr}; use common::const_to_opt_u128; use consts; -use type_of::{self, LayoutLlvmExt}; +use type_of::LayoutLlvmExt; use type_::Type; use value::Value; @@ -145,7 +145,7 @@ impl<'a, 'tcx> Const<'tcx> { let val = if llty == llvalty && common::type_is_imm_pair(ccx, self.ty) { let (a, b) = self.get_pair(ccx); OperandValue::Pair(a, b) - } else if llty == llvalty && common::type_is_immediate(ccx, self.ty) { + } else if llty == llvalty && ccx.layout_of(self.ty).is_llvm_immediate() { // If the types match, we can use the value directly. OperandValue::Immediate(self.llval) } else { @@ -677,11 +677,12 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { } C_fat_ptr(self.ccx, base, info) } - mir::CastKind::Misc if common::type_is_immediate(self.ccx, operand.ty) => { - debug_assert!(common::type_is_immediate(self.ccx, cast_ty)); + mir::CastKind::Misc if self.ccx.layout_of(operand.ty).is_llvm_immediate() => { let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); - let ll_t_out = self.ccx.layout_of(cast_ty).immediate_llvm_type(self.ccx); + let cast_layout = self.ccx.layout_of(cast_ty); + assert!(cast_layout.is_llvm_immediate()); + let ll_t_out = cast_layout.immediate_llvm_type(self.ccx); let llval = operand.llval; let signed = match self.ccx.layout_of(operand.ty).abi { layout::Abi::Scalar(layout::Int(_, signed)) => signed, @@ -728,8 +729,10 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> { if common::type_is_fat_ptr(self.ccx, operand.ty) { let (data_ptr, meta) = operand.get_fat_ptr(self.ccx); if common::type_is_fat_ptr(self.ccx, cast_ty) { - let llcast_ty = type_of::fat_ptr_base_ty(self.ccx, cast_ty); - let data_cast = consts::ptrcast(data_ptr, llcast_ty); + let thin_ptr = self.ccx.layout_of(cast_ty) + .field(self.ccx, abi::FAT_PTR_ADDR); + let data_cast = consts::ptrcast(data_ptr, + thin_ptr.llvm_type(self.ccx)); C_fat_ptr(self.ccx, data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and @@ -1091,7 +1094,7 @@ fn trans_const_adt<'a, 'tcx>( mir::AggregateKind::Adt(_, index, _, _) => index, _ => 0, }; - match *l.layout { + match l.layout { layout::Layout::General { .. } => { let discr = match *kind { mir::AggregateKind::Adt(adt_def, _, _, _) => { @@ -1147,7 +1150,7 @@ fn trans_const_adt<'a, 'tcx>( /// a two-element struct will locate it at offset 4, and accesses to it /// will read the wrong memory. fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, - layout: layout::FullLayout<'tcx>, + layout: layout::TyLayout<'tcx>, vals: &[Const<'tcx>], discr: Option>) -> Const<'tcx> { diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 0732720bd1a..7c0b2748a7f 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -10,7 +10,7 @@ use llvm::{self, ValueRef}; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, FullLayout, LayoutOf}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; @@ -19,7 +19,7 @@ use base; use builder::Builder; use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, val_ty}; use consts; -use type_of::{self, LayoutLlvmExt}; +use type_of::LayoutLlvmExt; use type_::Type; use value::Value; use glue; @@ -54,8 +54,8 @@ impl ops::BitOr for Alignment { } } -impl<'a> From> for Alignment { - fn from(layout: FullLayout) -> Self { +impl<'a> From> for Alignment { + fn from(layout: TyLayout) -> Self { if let layout::Abi::Aggregate { packed: true, align, .. } = layout.abi { Alignment::Packed(align) } else { @@ -86,7 +86,7 @@ pub struct LvalueRef<'tcx> { pub llextra: ValueRef, /// Monomorphized type of this lvalue, including variant information - pub layout: FullLayout<'tcx>, + pub layout: TyLayout<'tcx>, /// Whether this lvalue is known to be aligned according to its layout pub alignment: Alignment, @@ -94,7 +94,7 @@ pub struct LvalueRef<'tcx> { impl<'a, 'tcx> LvalueRef<'tcx> { pub fn new_sized(llval: ValueRef, - layout: FullLayout<'tcx>, + layout: TyLayout<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> { LvalueRef { @@ -105,7 +105,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } } - pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: FullLayout<'tcx>, name: &str) + pub fn alloca(bcx: &Builder<'a, 'tcx>, layout: TyLayout<'tcx>, name: &str) -> LvalueRef<'tcx> { debug!("alloca({:?}: {:?})", name, layout); let tmp = bcx.alloca( @@ -114,7 +114,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { } pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef { - if let layout::FieldPlacement::Array { count, .. } = *self.layout.fields { + if let layout::FieldPlacement::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert!(self.has_extra()); assert_eq!(count, 0); @@ -163,7 +163,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { OperandValue::Pair( self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(), self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate()) - } else if common::type_is_immediate(bcx.ccx, self.layout.ty) { + } else if self.layout.is_llvm_immediate() { let mut const_llval = ptr::null_mut(); unsafe { let global = llvm::LLVMIsAGlobalVariable(self.llval); @@ -202,28 +202,15 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let ccx = bcx.ccx; let field = self.layout.field(ccx, ix); let offset = self.layout.fields.offset(ix).bytes(); - let alignment = self.alignment | Alignment::from(self.layout); - // Unions and newtypes only use an offset of 0. - let has_llvm_fields = match *self.layout.fields { - layout::FieldPlacement::Union(_) => false, - layout::FieldPlacement::Array { .. } => true, - layout::FieldPlacement::Arbitrary { .. } => { - match self.layout.abi { - layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => false, - layout::Abi::Aggregate { .. } => true - } - } - }; - let simple = || { LvalueRef { - llval: if has_llvm_fields { - bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) - } else { - assert_eq!(offset, 0); + // Unions and newtypes only use an offset of 0. + llval: if offset == 0 { bcx.pointercast(self.llval, field.llvm_type(ccx).ptr_to()) + } else { + bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) }, llextra: if ccx.shared().type_has_metadata(field.ty) { self.llextra @@ -309,7 +296,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { /// Obtain the actual discriminant of a value. pub fn trans_get_discr(self, bcx: &Builder<'a, 'tcx>, cast_to: Ty<'tcx>) -> ValueRef { let cast_to = bcx.ccx.layout_of(cast_to).immediate_llvm_type(bcx.ccx); - match *self.layout.layout { + match self.layout.layout { layout::Layout::Univariant { .. } | layout::Layout::UntaggedUnion { .. } => return C_uint(cast_to, 0), _ => {} @@ -320,7 +307,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { layout::Abi::Scalar(discr) => discr, _ => bug!("discriminant not scalar: {:#?}", discr.layout) }; - let (min, max) = match *self.layout.layout { + let (min, max) = match self.layout.layout { layout::Layout::General { ref discr_range, .. } => (discr_range.start, discr_range.end), _ => (0, u64::max_value()), }; @@ -346,7 +333,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { bcx.load(discr.llval, discr.alignment.non_abi()) } }; - match *self.layout.layout { + match self.layout.layout { layout::Layout::General { .. } => { let signed = match discr_scalar { layout::Int(_, signed) => signed, @@ -369,7 +356,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let to = self.layout.ty.ty_adt_def().unwrap() .discriminant_for_variant(bcx.tcx(), variant_index) .to_u128_unchecked() as u64; - match *self.layout.layout { + match self.layout.layout { layout::Layout::General { .. } => { let ptr = self.project_field(bcx, 0); bcx.store(C_int(ptr.layout.llvm_type(bcx.ccx), to as i64), @@ -419,17 +406,9 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let mut downcast = *self; downcast.layout = self.layout.for_variant(variant_index); - // If this is an enum, cast to the appropriate variant struct type. - match *self.layout.layout { - layout::Layout::NullablePointer { .. } | - layout::Layout::General { .. } => { - let variant_ty = Type::struct_(bcx.ccx, - &type_of::struct_llfields(bcx.ccx, downcast.layout), - downcast.layout.is_packed()); - downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); - } - _ => {} - } + // Cast to the appropriate variant struct type. + let variant_ty = downcast.layout.llvm_type(bcx.ccx); + downcast.llval = bcx.pointercast(downcast.llval, variant_ty.ptr_to()); downcast } diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs index 2cad0964484..38719fedede 100644 --- a/src/librustc_trans/mir/mod.rs +++ b/src/librustc_trans/mir/mod.rs @@ -12,18 +12,17 @@ use libc::c_uint; use llvm::{self, ValueRef, BasicBlockRef}; use llvm::debuginfo::DIScope; use rustc::ty::{self, TypeFoldable}; -use rustc::ty::layout::{LayoutOf, FullLayout}; +use rustc::ty::layout::{LayoutOf, TyLayout}; use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::infer::TransNormalize; use rustc::session::config::FullDebugInfo; use base; use builder::Builder; -use common::{self, CrateContext, Funclet}; +use common::{CrateContext, Funclet}; use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; use monomorphize::Instance; use abi::{ArgAttribute, FnType}; -use type_of::{self, LayoutLlvmExt}; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -85,7 +84,7 @@ pub struct MirContext<'a, 'tcx:'a> { /// directly using an `OperandRef`, which makes for tighter LLVM /// IR. The conditions for using an `OperandRef` are as follows: /// - /// - the type of the local must be judged "immediate" by `type_is_immediate` + /// - the type of the local must be judged "immediate" by `is_llvm_immediate` /// - the operand must never be referenced indirectly /// - we should not take its address using the `&` operator /// - nor should it appear in an lvalue path like `tmp.a` @@ -177,7 +176,7 @@ enum LocalRef<'tcx> { } impl<'a, 'tcx> LocalRef<'tcx> { - fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: FullLayout<'tcx>) -> LocalRef<'tcx> { + fn new_operand(ccx: &CrateContext<'a, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'tcx> { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -448,32 +447,14 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, assert!(!a.is_ignore() && a.cast.is_none() && a.pad.is_none()); assert!(!b.is_ignore() && b.cast.is_none() && b.pad.is_none()); - let mut a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + let a = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(a, &(name.clone() + ".0")); llarg_idx += 1; - let mut b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + let b = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); + bcx.set_value_name(b, &(name + ".1")); llarg_idx += 1; - if common::type_is_fat_ptr(bcx.ccx, arg.layout.ty) { - // FIXME(eddyb) As we can't perfectly represent the data and/or - // vtable pointer in a fat pointers in Rust's typesystem, and - // because we split fat pointers into two ArgType's, they're - // not the right type so we have to cast them for now. - let pointee = match arg.layout.ty.sty { - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => ty, - ty::TyAdt(def, _) if def.is_box() => arg.layout.ty.boxed_ty(), - _ => bug!() - }; - let data_llty = bcx.ccx.layout_of(pointee).llvm_type(bcx.ccx); - let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee); - - a = bcx.pointercast(a, data_llty.ptr_to()); - bcx.set_value_name(a, &(name.clone() + ".ptr")); - b = bcx.pointercast(b, meta_llty); - bcx.set_value_name(b, &(name + ".meta")); - } - return LocalRef::Operand(Some(OperandRef { val: OperandValue::Pair(a, b), layout: arg.layout diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index d1922f8bf99..5659072fa93 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -10,7 +10,7 @@ use llvm::ValueRef; use rustc::ty; -use rustc::ty::layout::{LayoutOf, FullLayout}; +use rustc::ty::layout::{LayoutOf, TyLayout}; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; @@ -71,7 +71,7 @@ pub struct OperandRef<'tcx> { pub val: OperandValue, // The layout of value, based on its Rust type. - pub layout: FullLayout<'tcx>, + pub layout: TyLayout<'tcx>, } impl<'tcx> fmt::Debug for OperandRef<'tcx> { @@ -82,7 +82,7 @@ impl<'tcx> fmt::Debug for OperandRef<'tcx> { impl<'a, 'tcx> OperandRef<'tcx> { pub fn new_zst(ccx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>) -> OperandRef<'tcx> { + layout: TyLayout<'tcx>) -> OperandRef<'tcx> { assert!(layout.is_zst()); let llty = layout.llvm_type(ccx); // FIXME(eddyb) ZSTs should always be immediate, not pairs. diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index c7cb69339f7..b68cd3a6ae5 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -18,6 +18,7 @@ use rustc_apfloat::{ieee, Float, Status, Round}; use rustc_const_math::MAX_F32_PLUS_HALF_ULP; use std::{u128, i128}; +use abi; use base; use builder::Builder; use callee; @@ -26,7 +27,7 @@ use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_i use consts; use monomorphize; use type_::Type; -use type_of::{self, LayoutLlvmExt}; +use type_of::LayoutLlvmExt; use value::Value; use super::{MirContext, LocalRef}; @@ -234,8 +235,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { // &'a fmt::Debug+Send => &'a fmt::Debug, // So we need to pointercast the base to ensure // the types match up. - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty); - let lldata = bcx.pointercast(lldata, llcast_ty); + let thin_ptr = cast.field(bcx.ccx, abi::FAT_PTR_ADDR); + let lldata = bcx.pointercast(lldata, thin_ptr.llvm_type(bcx.ccx)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { @@ -253,8 +254,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { mir::CastKind::Misc if common::type_is_fat_ptr(bcx.ccx, operand.layout.ty) => { if let OperandValue::Pair(data_ptr, meta) = operand.val { if common::type_is_fat_ptr(bcx.ccx, cast.ty) { - let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx, cast.ty); - let data_cast = bcx.pointercast(data_ptr, llcast_ty); + let thin_ptr = cast.field(bcx.ccx, abi::FAT_PTR_ADDR); + let data_cast = bcx.pointercast(data_ptr, + thin_ptr.llvm_type(bcx.ccx)); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and @@ -268,7 +270,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } } mir::CastKind::Misc => { - debug_assert!(common::type_is_immediate(bcx.ccx, cast.ty)); + assert!(cast.is_llvm_immediate()); let r_t_in = CastTy::from_ty(operand.layout.ty) .expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); @@ -276,7 +278,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let ll_t_out = cast.immediate_llvm_type(bcx.ccx); let llval = operand.immediate(); - if let Layout::General { ref discr_range, .. } = *operand.layout.layout { + if let Layout::General { ref discr_range, .. } = operand.layout.layout { if discr_range.end > discr_range.start { // We want `table[e as usize]` to not // have bound checks, and this is the most diff --git a/src/librustc_trans/type_.rs b/src/librustc_trans/type_.rs index dbdc8919da9..53aaed15783 100644 --- a/src/librustc_trans/type_.rs +++ b/src/librustc_trans/type_.rs @@ -207,10 +207,6 @@ impl Type { ty!(llvm::LLVMVectorType(ty.to_ref(), len as c_uint)) } - pub fn vtable_ptr(ccx: &CrateContext) -> Type { - Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to() - } - pub fn kind(&self) -> TypeKind { unsafe { llvm::LLVMRustGetTypeKind(self.to_ref()) diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 60c2b539739..77cc3897c9b 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -11,131 +11,68 @@ use abi::FnType; use common::*; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, HasDataLayout, Align, LayoutOf, Size, FullLayout}; +use rustc::ty::layout::{self, HasDataLayout, Align, LayoutOf, Size, TyLayout}; use trans_item::DefPathBasedNames; use type_::Type; -use syntax::ast; +use std::fmt::Write; -pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { - match ty.sty { - ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) | - ty::TyRawPtr(ty::TypeAndMut { ty: t, .. }) if ccx.shared().type_has_metadata(t) => { - ccx.layout_of(t).llvm_type(ccx).ptr_to() - } - ty::TyAdt(def, _) if def.is_box() => { - ccx.layout_of(ty.boxed_ty()).llvm_type(ccx).ptr_to() - } - _ => bug!("expected fat ptr ty but got {:?}", ty) - } -} - -pub fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { - let unsized_part = ccx.tcx().struct_tail(ty); - match unsized_part.sty { - ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => { - Type::uint_from_ty(ccx, ast::UintTy::Us) - } - ty::TyDynamic(..) => Type::vtable_ptr(ccx), - _ => bug!("Unexpected tail in unsized_info_ty: {:?} for ty={:?}", - unsized_part, ty) - } -} - -fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - ty: Ty<'tcx>, - defer: &mut Option<(Type, FullLayout<'tcx>)>) +fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + layout: TyLayout<'tcx>, + defer: &mut Option<(Type, TyLayout<'tcx>)>) -> Type { - let ptr_ty = |ty: Ty<'tcx>| { - if cx.shared().type_has_metadata(ty) { - if let ty::TyStr = ty.sty { - // This means we get a nicer name in the output (str is always - // unsized). - cx.str_slice_type() - } else { - let ptr_ty = cx.layout_of(ty).llvm_type(cx).ptr_to(); - let info_ty = unsized_info_ty(cx, ty); - Type::struct_(cx, &[ - Type::array(&Type::i8(cx), 0), - ptr_ty, - Type::array(&Type::i8(cx), 0), - info_ty, - Type::array(&Type::i8(cx), 0) - ], false) - } - } else { - cx.layout_of(ty).llvm_type(cx).ptr_to() - } - }; - match ty.sty { - ty::TyRef(_, ty::TypeAndMut{ty, ..}) | - ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => { - return ptr_ty(ty); + match layout.abi { + layout::Abi::Scalar(_) => bug!("handled elsewhere"), + layout::Abi::Vector { .. } => { + return Type::vector(&layout.field(ccx, 0).llvm_type(ccx), + layout.fields.count() as u64); } - ty::TyAdt(def, _) if def.is_box() => { - return ptr_ty(ty.boxed_ty()); - } - ty::TyFnPtr(sig) => { - let sig = cx.tcx().erase_late_bound_regions_and_normalize(&sig); - return FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to(); - } - _ => {} + layout::Abi::Aggregate { .. } => {} } - let layout = cx.layout_of(ty); - if let layout::Abi::Scalar(value) = layout.abi { - let llty = match value { - layout::Int(layout::I1, _) => Type::i8(cx), - layout::Int(i, _) => Type::from_integer(cx, i), - layout::F32 => Type::f32(cx), - layout::F64 => Type::f64(cx), - layout::Pointer => { - cx.layout_of(layout::Pointer.to_ty(cx.tcx())).llvm_type(cx) - } - }; - return llty; - } - - if let layout::Abi::Vector { .. } = layout.abi { - return Type::vector(&layout.field(cx, 0).llvm_type(cx), - layout.fields.count() as u64); - } - - let name = match ty.sty { - ty::TyClosure(..) | ty::TyGenerator(..) | ty::TyAdt(..) => { + let name = match layout.ty.sty { + ty::TyClosure(..) | + ty::TyGenerator(..) | + ty::TyAdt(..) | + ty::TyDynamic(..) | + ty::TyForeign(..) | + ty::TyStr => { let mut name = String::with_capacity(32); - let printer = DefPathBasedNames::new(cx.tcx(), true, true); - printer.push_type_name(ty, &mut name); + let printer = DefPathBasedNames::new(ccx.tcx(), true, true); + printer.push_type_name(layout.ty, &mut name); + if let (&ty::TyAdt(def, _), Some(v)) = (&layout.ty.sty, layout.variant_index) { + write!(&mut name, "::{}", def.variants[v].name).unwrap(); + } Some(name) } _ => None }; - match *layout.fields { + match layout.fields { layout::FieldPlacement::Union(_) => { - let size = layout.size(cx).bytes(); - let fill = Type::array(&Type::i8(cx), size); + let size = layout.size(ccx).bytes(); + let fill = Type::array(&Type::i8(ccx), size); match name { None => { - Type::struct_(cx, &[fill], layout.is_packed()) + Type::struct_(ccx, &[fill], layout.is_packed()) } Some(ref name) => { - let mut llty = Type::named_struct(cx, name); + let mut llty = Type::named_struct(ccx, name); llty.set_struct_body(&[fill], layout.is_packed()); llty } } } layout::FieldPlacement::Array { count, .. } => { - Type::array(&layout.field(cx, 0).llvm_type(cx), count) + Type::array(&layout.field(ccx, 0).llvm_type(ccx), count) } layout::FieldPlacement::Arbitrary { .. } => { match name { None => { - Type::struct_(cx, &struct_llfields(cx, layout), layout.is_packed()) + Type::struct_(ccx, &struct_llfields(ccx, layout), layout.is_packed()) } Some(ref name) => { - let llty = Type::named_struct(cx, name); + let llty = Type::named_struct(ccx, name); *defer = Some((llty, layout)); llty } @@ -144,37 +81,37 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, } } -pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, - layout: FullLayout<'tcx>) -> Vec { +fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, + layout: TyLayout<'tcx>) -> Vec { debug!("struct_llfields: {:#?}", layout); - let align = layout.align(cx); - let size = layout.size(cx); + let align = layout.align(ccx); + let size = layout.size(ccx); let field_count = layout.fields.count(); let mut offset = Size::from_bytes(0); let mut result: Vec = Vec::with_capacity(1 + field_count * 2); for i in layout.fields.index_by_increasing_offset() { - let field = layout.field(cx, i); + let field = layout.field(ccx, i); let target_offset = layout.fields.offset(i as usize); debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?}", i, field, offset, target_offset); assert!(target_offset >= offset); let padding = target_offset - offset; - result.push(Type::array(&Type::i8(cx), padding.bytes())); + result.push(Type::array(&Type::i8(ccx), padding.bytes())); debug!(" padding before: {:?}", padding); - result.push(field.llvm_type(cx)); + result.push(field.llvm_type(ccx)); if layout.is_packed() { assert_eq!(padding.bytes(), 0); } else { - let field_align = field.align(cx); + let field_align = field.align(ccx); assert!(field_align.abi() <= align.abi(), "non-packed type has field with larger align ({}): {:#?}", field_align.abi(), layout); } - offset = target_offset + field.size(cx); + offset = target_offset + field.size(ccx); } if !layout.is_unsized() && field_count > 0 { if offset > size { @@ -184,7 +121,7 @@ pub fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, let padding = size - offset; debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", padding, offset, size); - result.push(Type::array(&Type::i8(cx), padding.bytes())); + result.push(Type::array(&Type::i8(ccx), padding.bytes())); assert!(result.len() == 1 + field_count * 2); } else { debug!("struct_llfields: offset: {:?} stride: {:?}", @@ -210,13 +147,22 @@ impl<'a, 'tcx> CrateContext<'a, 'tcx> { } pub trait LayoutLlvmExt<'tcx> { + fn is_llvm_immediate(&self) -> bool; fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn over_align(&self, ccx: &CrateContext) -> Option; fn llvm_field_index(&self, index: usize) -> u64; } -impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> { +impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { + fn is_llvm_immediate(&self) -> bool { + match self.abi { + layout::Abi::Scalar(_) | layout::Abi::Vector { .. } => true, + + layout::Abi::Aggregate { .. } => self.is_zst() + } + } + /// Get the LLVM type corresponding to a Rust type, i.e. `rustc::ty::Ty`. /// The pointee type of the pointer in `LvalueRef` is always this type. /// For sized types, it is also the right LLVM type for an `alloca` @@ -229,8 +175,42 @@ impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> { /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type { + if let layout::Abi::Scalar(value) = self.abi { + // Use a different cache for scalars because pointers to DSTs + // can be either fat or thin (data pointers of fat pointers). + if let Some(&llty) = ccx.scalar_lltypes().borrow().get(&self.ty) { + return llty; + } + let llty = match value { + layout::Int(layout::I1, _) => Type::i8(ccx), + layout::Int(i, _) => Type::from_integer(ccx, i), + layout::F32 => Type::f32(ccx), + layout::F64 => Type::f64(ccx), + layout::Pointer => { + let pointee = match self.ty.sty { + ty::TyRef(_, ty::TypeAndMut { ty, .. }) | + ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => { + ccx.layout_of(ty).llvm_type(ccx) + } + ty::TyAdt(def, _) if def.is_box() => { + ccx.layout_of(self.ty.boxed_ty()).llvm_type(ccx) + } + ty::TyFnPtr(sig) => { + let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&sig); + FnType::new(ccx, sig, &[]).llvm_type(ccx) + } + _ => Type::i8(ccx) + }; + pointee.ptr_to() + } + }; + ccx.scalar_lltypes().borrow_mut().insert(self.ty, llty); + return llty; + } + + // Check the cache. - if let Some(&llty) = ccx.lltypes().borrow().get(&self.ty) { + if let Some(&llty) = ccx.lltypes().borrow().get(&(self.ty, self.variant_index)) { return llty; } @@ -244,13 +224,17 @@ impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> { let mut defer = None; let llty = if self.ty != normal_ty { - ccx.layout_of(normal_ty).llvm_type(ccx) + let mut layout = ccx.layout_of(normal_ty); + if let Some(v) = self.variant_index { + layout = layout.for_variant(v); + } + layout.llvm_type(ccx) } else { - uncached_llvm_type(ccx, self.ty, &mut defer) + uncached_llvm_type(ccx, *self, &mut defer) }; debug!("--> mapped {:#?} to llty={:?}", self, llty); - ccx.lltypes().borrow_mut().insert(self.ty, llty); + ccx.lltypes().borrow_mut().insert((self.ty, self.variant_index), llty); if let Some((mut llty, layout)) = defer { llty.set_struct_body(&struct_llfields(ccx, layout), layout.is_packed()) @@ -279,11 +263,11 @@ impl<'tcx> LayoutLlvmExt<'tcx> for FullLayout<'tcx> { fn llvm_field_index(&self, index: usize) -> u64 { if let layout::Abi::Scalar(_) = self.abi { - bug!("FullLayout::llvm_field_index({:?}): not applicable", self); + bug!("TyLayout::llvm_field_index({:?}): not applicable", self); } - match *self.fields { + match self.fields { layout::FieldPlacement::Union(_) => { - bug!("FullLayout::llvm_field_index({:?}): not applicable", self) + bug!("TyLayout::llvm_field_index({:?}): not applicable", self) } layout::FieldPlacement::Array { .. } => { diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index 8a680f1c9d6..525a1f5310c 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -24,10 +24,9 @@ pub fn helper(_: usize) { pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { // We used to generate an extra alloca and memcpy for the block's trailing expression value, so // check that we copy directly to the return value slot -// CHECK: %x.ptr = bitcast i8* %0 to [0 x i8]* -// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.ptr, 1 -// CHECK: %2 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1, [[USIZE]] %x.meta, 3 -// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %2 +// CHECK: %0 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.0, 1 +// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %0, [[USIZE]] %x.1, 3 +// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1 { x } } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 5d073670d86..05682a8efae 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -97,43 +97,43 @@ pub fn struct_return() -> S { pub fn helper(_: usize) { } -// CHECK: @slice(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @slice([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn slice(_: &[u8]) { } -// CHECK: @mutable_slice(i8* nonnull %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @mutable_slice([0 x i8]* nonnull %arg0.0, [[USIZE]] %arg0.1) // FIXME #25759 This should also have `nocapture` // ... there's this LLVM bug that forces us to not use noalias, see #29485 #[no_mangle] pub fn mutable_slice(_: &mut [u8]) { } -// CHECK: @unsafe_slice(%UnsafeInner* nonnull %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @unsafe_slice([0 x %UnsafeInner]* nonnull %arg0.0, [[USIZE]] %arg0.1) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_slice(_: &[UnsafeInner]) { } -// CHECK: @str(i8* noalias nonnull readonly %arg0.ptr, [[USIZE]] %arg0.meta) +// CHECK: @str([0 x i8]* noalias nonnull readonly %arg0.0, [[USIZE]] %arg0.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn str(_: &[u8]) { } -// CHECK: @trait_borrow({}* nonnull, {}* noalias nonnull readonly) +// CHECK: @trait_borrow(%"core::ops::drop::Drop"* nonnull %arg0.0, {}* noalias nonnull readonly %arg0.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn trait_borrow(_: &Drop) { } -// CHECK: @trait_box({}* noalias nonnull, {}* noalias nonnull readonly) +// CHECK: @trait_box(%"core::ops::drop::Drop"* noalias nonnull, {}* noalias nonnull readonly) #[no_mangle] pub fn trait_box(_: Box) { } -// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice(i16* noalias nonnull readonly %x.ptr, [[USIZE]] %x.meta) +// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1) #[no_mangle] pub fn return_slice(x: &[u16]) -> &[u16] { x diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index ad799247f59..2ab64fffa3b 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -24,10 +24,10 @@ pub fn helper(_: usize) { pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[X0:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 1 -// CHECK: store [0 x i8]* %s.ptr, [0 x i8]** [[X0]] +// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x to [0 x i8]** +// CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]] // CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 3 -// CHECK: store [[USIZE]] %s.meta, [[USIZE]]* [[X1]] +// CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]] let x = &*s; &x; // keep variable in an alloca -- cgit 1.4.1-3-g733a5 From f8d5d0c30c32c20163e45c3c1521add198b63afc Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Tue, 3 Oct 2017 10:45:07 +0300 Subject: rustc_trans: compute better align/dereferenceable attributes from pointees. --- src/librustc_llvm/ffi.rs | 6 + src/librustc_trans/abi.rs | 284 +++++++++++++++++++-------------- src/rustllvm/RustWrapper.cpp | 58 +++++++ src/test/codegen/function-arguments.rs | 10 +- src/test/codegen/packed.rs | 2 +- 5 files changed, 237 insertions(+), 123 deletions(-) (limited to 'src/test/codegen/function-arguments.rs') diff --git a/src/librustc_llvm/ffi.rs b/src/librustc_llvm/ffi.rs index fdc27d4e041..f8c71d48255 100644 --- a/src/librustc_llvm/ffi.rs +++ b/src/librustc_llvm/ffi.rs @@ -730,7 +730,9 @@ extern "C" { FunctionTy: TypeRef) -> ValueRef; pub fn LLVMSetFunctionCallConv(Fn: ValueRef, CC: c_uint); + pub fn LLVMRustAddAlignmentAttr(Fn: ValueRef, index: c_uint, bytes: u32); pub fn LLVMRustAddDereferenceableAttr(Fn: ValueRef, index: c_uint, bytes: u64); + pub fn LLVMRustAddDereferenceableOrNullAttr(Fn: ValueRef, index: c_uint, bytes: u64); pub fn LLVMRustAddFunctionAttribute(Fn: ValueRef, index: c_uint, attr: Attribute); pub fn LLVMRustAddFunctionAttrStringValue(Fn: ValueRef, index: c_uint, @@ -760,7 +762,11 @@ extern "C" { // Operations on call sites pub fn LLVMSetInstructionCallConv(Instr: ValueRef, CC: c_uint); pub fn LLVMRustAddCallSiteAttribute(Instr: ValueRef, index: c_uint, attr: Attribute); + pub fn LLVMRustAddAlignmentCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u32); pub fn LLVMRustAddDereferenceableCallSiteAttr(Instr: ValueRef, index: c_uint, bytes: u64); + pub fn LLVMRustAddDereferenceableOrNullCallSiteAttr(Instr: ValueRef, + index: c_uint, + bytes: u64); // Operations on load/store instructions (only) pub fn LLVMSetVolatile(MemoryAccessInst: ValueRef, volatile: Bool); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index c4b90d94dd4..aaadc7518e5 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -96,20 +96,24 @@ impl ArgAttribute { /// A compact representation of LLVM attributes (at least those relevant for this module) /// that can be manipulated without interacting with LLVM's Attribute machinery. -#[derive(Copy, Clone, Debug, Default)] +#[derive(Copy, Clone, Debug)] pub struct ArgAttributes { regular: ArgAttribute, - dereferenceable_bytes: u64, + pointee_size: Size, + pointee_align: Option } impl ArgAttributes { - pub fn set(&mut self, attr: ArgAttribute) -> &mut Self { - self.regular = self.regular | attr; - self + fn new() -> Self { + ArgAttributes { + regular: ArgAttribute::default(), + pointee_size: Size::from_bytes(0), + pointee_align: None, + } } - pub fn set_dereferenceable(&mut self, size: Size) -> &mut Self { - self.dereferenceable_bytes = size.bytes(); + pub fn set(&mut self, attr: ArgAttribute) -> &mut Self { + self.regular = self.regular | attr; self } @@ -118,24 +122,52 @@ impl ArgAttributes { } pub fn apply_llfn(&self, idx: AttributePlace, llfn: ValueRef) { + let mut regular = self.regular; unsafe { - self.regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); - if self.dereferenceable_bytes != 0 { - llvm::LLVMRustAddDereferenceableAttr(llfn, - idx.as_uint(), - self.dereferenceable_bytes); + let deref = self.pointee_size.bytes(); + if deref != 0 { + if regular.contains(ArgAttribute::NonNull) { + llvm::LLVMRustAddDereferenceableAttr(llfn, + idx.as_uint(), + deref); + } else { + llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, + idx.as_uint(), + deref); + } + regular -= ArgAttribute::NonNull; } + if let Some(align) = self.pointee_align { + llvm::LLVMRustAddAlignmentAttr(llfn, + idx.as_uint(), + align.abi() as u32); + } + regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); } } pub fn apply_callsite(&self, idx: AttributePlace, callsite: ValueRef) { + let mut regular = self.regular; unsafe { - self.regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); - if self.dereferenceable_bytes != 0 { - llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, - idx.as_uint(), - self.dereferenceable_bytes); + let deref = self.pointee_size.bytes(); + if deref != 0 { + if regular.contains(ArgAttribute::NonNull) { + llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, + idx.as_uint(), + deref); + } else { + llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(callsite, + idx.as_uint(), + deref); + } + regular -= ArgAttribute::NonNull; + } + if let Some(align) = self.pointee_align { + llvm::LLVMRustAddAlignmentCallSiteAttr(callsite, + idx.as_uint(), + align.abi() as u32); } + regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); } } } @@ -439,12 +471,20 @@ pub struct ArgType<'tcx> { impl<'a, 'tcx> ArgType<'tcx> { fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> { + let mut attrs = ArgAttributes::new(); + + if let layout::Abi::Scalar(ref scalar) = layout.abi { + if scalar.is_bool() { + attrs.set(ArgAttribute::ZExt); + } + } + ArgType { kind: ArgKind::Direct, layout, cast: None, pad: None, - attrs: ArgAttributes::default(), + attrs, nested: vec![] } } @@ -454,14 +494,16 @@ impl<'a, 'tcx> ArgType<'tcx> { assert_eq!(self.kind, ArgKind::Direct); // Wipe old attributes, likely not valid through indirection. - self.attrs = ArgAttributes::default(); + self.attrs = ArgAttributes::new(); // For non-immediate arguments the callee gets its own copy of // the value on the stack, so there are no aliases. It's also // program-invisible so can't possibly capture self.attrs.set(ArgAttribute::NoAlias) .set(ArgAttribute::NoCapture) - .set_dereferenceable(self.layout.size); + .set(ArgAttribute::NonNull); + self.attrs.pointee_size = self.layout.size; + self.attrs.pointee_align = Some(self.layout.align); self.kind = ArgKind::Indirect; } @@ -472,6 +514,22 @@ impl<'a, 'tcx> ArgType<'tcx> { self.kind = ArgKind::Ignore; } + fn safe_pointee(&mut self, layout: TyLayout) { + match self.layout.abi { + layout::Abi::Scalar(layout::Scalar { + value: layout::Pointer, + ref valid_range + }) => { + if valid_range.start > 0 { + self.attrs.set(ArgAttribute::NonNull); + } + self.attrs.pointee_size = layout.size; + self.attrs.pointee_align = Some(layout.align); + } + _ => bug!("ArgType::safe_pointee({:#?}): not a pointer", self.layout) + } + } + pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness if let layout::Abi::Scalar(ref scalar) = self.layout.abi { @@ -694,123 +752,115 @@ impl<'a, 'tcx> FnType<'tcx> { _ => false }; - let arg_of = |ty: Ty<'tcx>, is_return: bool| { - let mut arg = ArgType::new(ccx.layout_of(ty)); - if let layout::Abi::Scalar(ref scalar) = arg.layout.abi { - if scalar.is_bool() { - arg.attrs.set(ArgAttribute::ZExt); - } - } - if arg.layout.is_zst() { - // For some forsaken reason, x86_64-pc-windows-gnu - // doesn't ignore zero-sized struct arguments. - // The same is true for s390x-unknown-linux-gnu. - if is_return || rust_abi || - (!win_x64_gnu && !linux_s390x) { - arg.ignore(); - } - } - arg - }; - - let ret_ty = sig.output(); - let mut ret = arg_of(ret_ty, true); - - if !type_is_fat_ptr(ccx, ret_ty) { - // The `noalias` attribute on the return value is useful to a - // function ptr caller. - if ret_ty.is_box() { - // `Box` pointer return values never alias because ownership - // is transferred - ret.attrs.set(ArgAttribute::NoAlias); + // Handle safe Rust thin and fat pointers. + let adjust_for_rust_type = |arg: &mut ArgType<'tcx>, is_return: bool| { + // We only handle thin pointers here. + match arg.layout.abi { + layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => {} + _ => return } - // We can also mark the return value as `dereferenceable` in certain cases - match ret_ty.sty { - // These are not really pointers but pairs, (pointer, len) - ty::TyRef(_, ty::TypeAndMut { ty, .. }) => { - ret.attrs.set_dereferenceable(ccx.size_of(ty)); - } - ty::TyAdt(def, _) if def.is_box() => { - ret.attrs.set_dereferenceable(ccx.size_of(ret_ty.boxed_ty())); + let mut ty = arg.layout.ty; + + // FIXME(eddyb) detect more nested cases than `Option<&T>` here. + match arg.layout.variants { + layout::Variants::NicheFilling { dataful_variant, .. } => { + let variant = arg.layout.for_variant(ccx, dataful_variant); + for i in 0..variant.fields.count() { + let field = variant.field(ccx, i); + match field.abi { + layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => { + // We found the pointer field, use its type. + ty = field.ty; + break; + } + _ => {} + } + } } _ => {} } - } - let mut args = Vec::with_capacity(inputs.len() + extra_args.len()); + match ty.sty { + // `Box` pointer parameters never alias because ownership is transferred + ty::TyAdt(def, _) if def.is_box() => { + arg.attrs.set(ArgAttribute::NoAlias); - // Handle safe Rust thin and fat pointers. - let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty { - // `Box` pointer parameters never alias because ownership is transferred - ty::TyAdt(def, _) if def.is_box() => { - arg.attrs.set(ArgAttribute::NoAlias); - Some(ty.boxed_ty()) - } + arg.safe_pointee(ccx.layout_of(ty.boxed_ty())); + } - ty::TyRef(_, mt) => { - // `&mut` pointer parameters never alias other parameters, or mutable global data - // - // `&T` where `T` contains no `UnsafeCell` is immutable, and can be marked as - // both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely - // on memory dependencies rather than pointer equality - let is_freeze = ccx.shared().type_is_freeze(mt.ty); - - let no_alias_is_safe = - if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias || - ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort { - // Mutable refrences or immutable shared references - mt.mutbl == hir::MutMutable || is_freeze - } else { - // Only immutable shared references - mt.mutbl != hir::MutMutable && is_freeze - }; + ty::TyRef(_, mt) => { + // `&mut` pointer parameters never alias other parameters, + // or mutable global data + // + // `&T` where `T` contains no `UnsafeCell` is immutable, + // and can be marked as both `readonly` and `noalias`, as + // LLVM's definition of `noalias` is based solely on memory + // dependencies rather than pointer equality + let is_freeze = ccx.shared().type_is_freeze(mt.ty); + + let no_alias_is_safe = + if ccx.shared().tcx().sess.opts.debugging_opts.mutable_noalias || + ccx.shared().tcx().sess.panic_strategy() == PanicStrategy::Abort { + // Mutable refrences or immutable shared references + mt.mutbl == hir::MutMutable || is_freeze + } else { + // Only immutable shared references + mt.mutbl != hir::MutMutable && is_freeze + }; - if no_alias_is_safe { - arg.attrs.set(ArgAttribute::NoAlias); - } + if no_alias_is_safe { + arg.attrs.set(ArgAttribute::NoAlias); + } - if mt.mutbl == hir::MutImmutable && is_freeze { - arg.attrs.set(ArgAttribute::ReadOnly); + if mt.mutbl == hir::MutImmutable && is_freeze && !is_return { + arg.attrs.set(ArgAttribute::ReadOnly); + } + + arg.safe_pointee(ccx.layout_of(mt.ty)); } + _ => {} + } - Some(mt.ty) + // HACK(eddyb) LLVM inserts `llvm.assume` calls when inlining functions + // with align attributes, and those calls later block optimizations. + if !is_return { + arg.attrs.pointee_align = None; } - _ => None }; - for ty in inputs.iter().chain(extra_args.iter()) { - let mut arg = arg_of(ty, false); - - if type_is_fat_ptr(ccx, ty) { - let mut data = ArgType::new(arg.layout.field(ccx, 0)); - let mut info = ArgType::new(arg.layout.field(ccx, 1)); - - if let Some(inner) = rust_ptr_attrs(ty, &mut data) { - data.attrs.set(ArgAttribute::NonNull); - if ccx.tcx().struct_tail(inner).is_trait() { - // vtables can be safely marked non-null, readonly - // and noalias. - info.attrs.set(ArgAttribute::NonNull); - info.attrs.set(ArgAttribute::ReadOnly); - info.attrs.set(ArgAttribute::NoAlias); - } - } - // FIXME(eddyb) other ABIs don't have logic for nested. - if rust_abi { - arg.nested = vec![data, info]; + let arg_of = |ty: Ty<'tcx>, is_return: bool| { + let mut arg = ArgType::new(ccx.layout_of(ty)); + if arg.layout.is_zst() { + // For some forsaken reason, x86_64-pc-windows-gnu + // doesn't ignore zero-sized struct arguments. + // The same is true for s390x-unknown-linux-gnu. + if is_return || rust_abi || + (!win_x64_gnu && !linux_s390x) { + arg.ignore(); } + } + + // FIXME(eddyb) other ABIs don't have logic for nested. + if !is_return && type_is_fat_ptr(ccx, arg.layout.ty) && rust_abi { + arg.nested = vec![ + ArgType::new(arg.layout.field(ccx, 0)), + ArgType::new(arg.layout.field(ccx, 1)) + ]; + adjust_for_rust_type(&mut arg.nested[0], false); + adjust_for_rust_type(&mut arg.nested[1], false); } else { - if let Some(inner) = rust_ptr_attrs(ty, &mut arg) { - arg.attrs.set_dereferenceable(ccx.size_of(inner)); - } + adjust_for_rust_type(&mut arg, is_return); } - args.push(arg); - } + + arg + }; FnType { - args, - ret, + ret: arg_of(sig.output(), true), + args: inputs.iter().chain(extra_args.iter()).map(|ty| { + arg_of(ty, false) + }).collect(), variadic: sig.variadic, cconv, } diff --git a/src/rustllvm/RustWrapper.cpp b/src/rustllvm/RustWrapper.cpp index c8d974febf2..9aa172591b8 100644 --- a/src/rustllvm/RustWrapper.cpp +++ b/src/rustllvm/RustWrapper.cpp @@ -178,6 +178,22 @@ extern "C" void LLVMRustAddCallSiteAttribute(LLVMValueRef Instr, unsigned Index, #endif } +extern "C" void LLVMRustAddAlignmentCallSiteAttr(LLVMValueRef Instr, + unsigned Index, + uint32_t Bytes) { + CallSite Call = CallSite(unwrap(Instr)); + AttrBuilder B; + B.addAlignmentAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, B)); +#else + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, + AttributeSet::get(Call->getContext(), Index, B))); +#endif +} + extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr, unsigned Index, uint64_t Bytes) { @@ -194,6 +210,22 @@ extern "C" void LLVMRustAddDereferenceableCallSiteAttr(LLVMValueRef Instr, #endif } +extern "C" void LLVMRustAddDereferenceableOrNullCallSiteAttr(LLVMValueRef Instr, + unsigned Index, + uint64_t Bytes) { + CallSite Call = CallSite(unwrap(Instr)); + AttrBuilder B; + B.addDereferenceableOrNullAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, B)); +#else + Call.setAttributes(Call.getAttributes().addAttributes( + Call->getContext(), Index, + AttributeSet::get(Call->getContext(), Index, B))); +#endif +} + extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index, LLVMRustAttribute RustAttr) { Function *A = unwrap(Fn); @@ -206,6 +238,19 @@ extern "C" void LLVMRustAddFunctionAttribute(LLVMValueRef Fn, unsigned Index, #endif } +extern "C" void LLVMRustAddAlignmentAttr(LLVMValueRef Fn, + unsigned Index, + uint32_t Bytes) { + Function *A = unwrap(Fn); + AttrBuilder B; + B.addAlignmentAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + A->addAttributes(Index, B); +#else + A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B)); +#endif +} + extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index, uint64_t Bytes) { Function *A = unwrap(Fn); @@ -218,6 +263,19 @@ extern "C" void LLVMRustAddDereferenceableAttr(LLVMValueRef Fn, unsigned Index, #endif } +extern "C" void LLVMRustAddDereferenceableOrNullAttr(LLVMValueRef Fn, + unsigned Index, + uint64_t Bytes) { + Function *A = unwrap(Fn); + AttrBuilder B; + B.addDereferenceableOrNullAttr(Bytes); +#if LLVM_VERSION_GE(5, 0) + A->addAttributes(Index, B); +#else + A->addAttributes(Index, AttributeSet::get(A->getContext(), Index, B)); +#endif +} + extern "C" void LLVMRustAddFunctionAttrStringValue(LLVMValueRef Fn, unsigned Index, const char *Name, diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 05682a8efae..6cb1972afa5 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -15,7 +15,7 @@ #![feature(custom_attribute)] pub struct S { - _field: [i64; 4], + _field: [i32; 8], } pub struct UnsafeInner { @@ -66,7 +66,7 @@ pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) { pub fn mutable_borrow(_: &mut i32) { } -// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32) %arg0) +// CHECK: @indirect_struct(%S* noalias nocapture align 4 dereferenceable(32) %arg0) #[no_mangle] pub fn indirect_struct(_: S) { } @@ -77,17 +77,17 @@ pub fn indirect_struct(_: S) { pub fn borrowed_struct(_: &S) { } -// CHECK: noalias dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x) +// CHECK: noalias align 4 dereferenceable(4) i32* @_box(i32* noalias dereferenceable(4) %x) #[no_mangle] pub fn _box(x: Box) -> Box { x } -// CHECK: @struct_return(%S* noalias nocapture sret dereferenceable(32)) +// CHECK: @struct_return(%S* noalias nocapture sret align 4 dereferenceable(32)) #[no_mangle] pub fn struct_return() -> S { S { - _field: [0, 0, 0, 0] + _field: [0, 0, 0, 0, 0, 0, 0, 0] } } diff --git a/src/test/codegen/packed.rs b/src/test/codegen/packed.rs index 99e6e38a3bf..87cf042f27e 100644 --- a/src/test/codegen/packed.rs +++ b/src/test/codegen/packed.rs @@ -39,7 +39,7 @@ pub struct BigPacked { #[no_mangle] pub fn call_pkd(f: fn() -> Array) -> BigPacked { // CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array -// CHECK: call void %{{.*}}(%Array* noalias nocapture sret dereferenceable(32) [[ALLOCA]]) +// CHECK: call void %{{.*}}(%Array* noalias nocapture sret align 4 dereferenceable(32) [[ALLOCA]]) // CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 32, i32 1, i1 false) // check that calls whose destination is a field of a packed struct // go through an alloca rather than calling the function with an -- cgit 1.4.1-3-g733a5 From cdeb4b0d258c19f57ee6fb089126656e18324367 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Fri, 6 Oct 2017 10:25:35 +0300 Subject: rustc: encode scalar pairs in layout ABI. --- src/librustc/ty/layout.rs | 138 +++++++++++++++++++++++---------- src/librustc_trans/abi.rs | 9 ++- src/librustc_trans/cabi_x86_64.rs | 1 + src/librustc_trans/cabi_x86_win64.rs | 1 + src/librustc_trans/common.rs | 9 +-- src/librustc_trans/context.rs | 7 +- src/librustc_trans/mir/analyze.rs | 4 +- src/librustc_trans/mir/constant.rs | 17 +++- src/librustc_trans/mir/lvalue.rs | 33 +++++--- src/librustc_trans/mir/operand.rs | 32 ++++---- src/librustc_trans/type_of.rs | 135 ++++++++++++++++++++++---------- src/test/codegen/adjustments.rs | 6 +- src/test/codegen/function-arguments.rs | 2 +- src/test/codegen/packed.rs | 5 +- src/test/codegen/refs.rs | 4 +- 15 files changed, 266 insertions(+), 137 deletions(-) (limited to 'src/test/codegen/function-arguments.rs') diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 0edd8f44f0c..21ba7995332 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -757,6 +757,7 @@ impl FieldPlacement { pub enum Abi { Uninhabited, Scalar(Scalar), + ScalarPair(Scalar, Scalar), Vector, Aggregate { /// If true, the size is exact, otherwise it's only a lower bound. @@ -769,7 +770,10 @@ impl Abi { /// Returns true if the layout corresponds to an unsized type. pub fn is_unsized(&self) -> bool { match *self { - Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector => false, + Abi::Uninhabited | + Abi::Scalar(_) | + Abi::ScalarPair(..) | + Abi::Vector => false, Abi::Aggregate { sized, .. } => !sized } } @@ -777,7 +781,10 @@ impl Abi { /// Returns true if the fields of the layout are packed. pub fn is_packed(&self) -> bool { match *self { - Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector => false, + Abi::Uninhabited | + Abi::Scalar(_) | + Abi::ScalarPair(..) | + Abi::Vector => false, Abi::Aggregate { packed, .. } => packed } } @@ -905,13 +912,32 @@ impl<'a, 'tcx> CachedLayout { -> Result<&'tcx Self, LayoutError<'tcx>> { let cx = (tcx, param_env); let dl = cx.data_layout(); - let scalar = |value: Primitive| { + let scalar_unit = |value: Primitive| { let bits = value.size(dl).bits(); assert!(bits <= 128); - tcx.intern_layout(CachedLayout::scalar(cx, Scalar { + Scalar { value, valid_range: 0..=(!0 >> (128 - bits)) - })) + } + }; + let scalar = |value: Primitive| { + tcx.intern_layout(CachedLayout::scalar(cx, scalar_unit(value))) + }; + let scalar_pair = |a: Scalar, b: Scalar| { + let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align); + let b_offset = a.value.size(dl).abi_align(b.value.align(dl)); + let size = (b_offset + b.value.size(dl)).abi_align(align); + CachedLayout { + variants: Variants::Single { index: 0 }, + fields: FieldPlacement::Arbitrary { + offsets: vec![Size::from_bytes(0), b_offset], + memory_index: vec![0, 1] + }, + abi: Abi::ScalarPair(a, b), + align, + primitive_align: align, + size + } }; #[derive(Copy, Clone, Debug)] @@ -1049,19 +1075,54 @@ impl<'a, 'tcx> CachedLayout { memory_index = inverse_memory_index; } + let size = min_size.abi_align(align); + let mut abi = Abi::Aggregate { + sized, + packed + }; + + // Look for a scalar pair, as an ABI optimization. + // FIXME(eddyb) ignore extra ZST fields and field ordering. + if sized && !packed && fields.len() == 2 { + match (&fields[0].abi, &fields[1].abi) { + (&Abi::Scalar(ref a), &Abi::Scalar(ref b)) => { + let pair = scalar_pair(a.clone(), b.clone()); + let pair_offsets = match pair.fields { + FieldPlacement::Arbitrary { + ref offsets, + ref memory_index + } => { + assert_eq!(memory_index, &[0, 1]); + offsets + } + _ => bug!() + }; + if offsets[0] == pair_offsets[0] && + offsets[1] == pair_offsets[1] && + memory_index[0] == 0 && + memory_index[1] == 1 && + align == pair.align && + primitive_align == pair.primitive_align && + size == pair.size { + // We can use `ScalarPair` only when it matches our + // already computed layout (including `#[repr(C)]`). + abi = pair.abi; + } + } + _ => {} + } + } + Ok(CachedLayout { variants: Variants::Single { index: 0 }, fields: FieldPlacement::Arbitrary { offsets, memory_index }, - abi: Abi::Aggregate { - sized, - packed - }, + abi, align, primitive_align, - size: min_size.abi_align(align) + size }) }; let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| { @@ -1070,45 +1131,34 @@ impl<'a, 'tcx> CachedLayout { assert!(!ty.has_infer_types()); let ptr_layout = |pointee: Ty<'tcx>| { + let mut data_ptr = scalar_unit(Pointer); + if !ty.is_unsafe_ptr() { + data_ptr.valid_range.start = 1; + } + let pointee = tcx.normalize_associated_type_in_env(&pointee, param_env); if pointee.is_sized(tcx, param_env, DUMMY_SP) { - let non_zero = !ty.is_unsafe_ptr(); - let bits = Pointer.size(dl).bits(); - return Ok(tcx.intern_layout(CachedLayout::scalar(cx, Scalar { - value: Pointer, - valid_range: (non_zero as u128)..=(!0 >> (128 - bits)) - }))); + return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr))); } let unsized_part = tcx.struct_tail(pointee); let metadata = match unsized_part.sty { - ty::TyForeign(..) => return Ok(scalar(Pointer)), + ty::TyForeign(..) => { + return Ok(tcx.intern_layout(CachedLayout::scalar(cx, data_ptr))); + } ty::TySlice(_) | ty::TyStr => { - Int(dl.ptr_sized_integer(), false) + scalar_unit(Int(dl.ptr_sized_integer(), false)) + } + ty::TyDynamic(..) => { + let mut vtable = scalar_unit(Pointer); + vtable.valid_range.start = 1; + vtable } - ty::TyDynamic(..) => Pointer, _ => return Err(LayoutError::Unknown(unsized_part)) }; // Effectively a (ptr, meta) tuple. - let align = Pointer.align(dl).max(metadata.align(dl)); - let meta_offset = Pointer.size(dl); - assert_eq!(meta_offset, meta_offset.abi_align(metadata.align(dl))); - let fields = FieldPlacement::Arbitrary { - offsets: vec![Size::from_bytes(0), meta_offset], - memory_index: vec![0, 1] - }; - Ok(tcx.intern_layout(CachedLayout { - variants: Variants::Single { index: 0 }, - fields, - abi: Abi::Aggregate { - sized: true, - packed: false - }, - align, - primitive_align: align, - size: (meta_offset + metadata.size(dl)).abi_align(align) - })) + Ok(tcx.intern_layout(scalar_pair(data_ptr, metadata))) }; Ok(match ty.sty { @@ -1134,11 +1184,9 @@ impl<'a, 'tcx> CachedLayout { ty::TyFloat(FloatTy::F32) => scalar(F32), ty::TyFloat(FloatTy::F64) => scalar(F64), ty::TyFnPtr(_) => { - let bits = Pointer.size(dl).bits(); - tcx.intern_layout(CachedLayout::scalar(cx, Scalar { - value: Pointer, - valid_range: 1..=(!0 >> (128 - bits)) - })) + let mut ptr = scalar_unit(Pointer); + ptr.valid_range.start = 1; + tcx.intern_layout(CachedLayout::scalar(cx, ptr)) } // The never type. @@ -2194,7 +2242,7 @@ impl<'a, 'tcx> TyLayout<'tcx> { pub fn is_zst(&self) -> bool { match self.abi { Abi::Uninhabited => true, - Abi::Scalar(_) => false, + Abi::Scalar(_) | Abi::ScalarPair(..) => false, Abi::Vector => self.size.bytes() == 0, Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0 } @@ -2347,6 +2395,10 @@ impl<'gcx> HashStable> for Abi { Scalar(ref value) => { value.hash_stable(hcx, hasher); } + ScalarPair(ref a, ref b) => { + a.hash_stable(hcx, hasher); + b.hash_stable(hcx, hasher); + } Vector => {} Aggregate { packed, sized } => { packed.hash_stable(hcx, hasher); diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 54e648c6d4a..d69103bbb52 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -311,6 +311,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { layout::Abi::Uninhabited | layout::Abi::Scalar(_) | layout::Abi::Vector => false, + layout::Abi::ScalarPair(..) | layout::Abi::Aggregate { .. } => true } } @@ -340,6 +341,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> { }) } + layout::Abi::ScalarPair(..) | layout::Abi::Aggregate { .. } => { let mut total = Size::from_bytes(0); let mut result = None; @@ -745,10 +747,13 @@ impl<'a, 'tcx> FnType<'tcx> { arg.attrs.set(ArgAttribute::NonNull); } } - _ => {} + _ => { + // Nothing to do for non-pointer types. + return; + } } - if let Some(pointee) = arg.layout.pointee_info(ccx) { + if let Some(pointee) = arg.layout.pointee_info_at(ccx, Size::from_bytes(0)) { if let Some(kind) = pointee.safe { arg.attrs.pointee_size = pointee.size; arg.attrs.pointee_align = Some(pointee.align); diff --git a/src/librustc_trans/cabi_x86_64.rs b/src/librustc_trans/cabi_x86_64.rs index 62540fac8b5..eeb69276500 100644 --- a/src/librustc_trans/cabi_x86_64.rs +++ b/src/librustc_trans/cabi_x86_64.rs @@ -88,6 +88,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>) } } + layout::Abi::ScalarPair(..) | layout::Abi::Aggregate { .. } => { match layout.variants { layout::Variants::Single { .. } => { diff --git a/src/librustc_trans/cabi_x86_win64.rs b/src/librustc_trans/cabi_x86_win64.rs index e93eeb83619..473c00120a7 100644 --- a/src/librustc_trans/cabi_x86_win64.rs +++ b/src/librustc_trans/cabi_x86_win64.rs @@ -18,6 +18,7 @@ pub fn compute_abi_info(fty: &mut FnType) { let fixup = |a: &mut ArgType| { match a.layout.abi { layout::Abi::Uninhabited => {} + layout::Abi::ScalarPair(..) | layout::Abi::Aggregate { .. } => { match a.layout.size.bits() { 8 => a.cast_to(Reg::i8()), diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 03ae58fd941..8a2c1ed2dc2 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -232,16 +232,9 @@ pub fn C_str_slice(cx: &CrateContext, s: InternedString) -> ValueRef { } pub fn C_fat_ptr(cx: &CrateContext, ptr: ValueRef, meta: ValueRef) -> ValueRef { - let empty = C_array(Type::i8(cx), &[]); assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_EXTRA, 1); - C_struct(cx, &[ - empty, - ptr, - empty, - meta, - empty - ], false) + C_struct(cx, &[ptr, meta], false) } pub fn C_struct(cx: &CrateContext, elts: &[ValueRef], packed: bool) -> ValueRef { diff --git a/src/librustc_trans/context.rs b/src/librustc_trans/context.rs index d768b14a82e..b2bb605d01b 100644 --- a/src/librustc_trans/context.rs +++ b/src/librustc_trans/context.rs @@ -31,7 +31,7 @@ use rustc::middle::trans::Stats; use rustc_data_structures::stable_hasher::StableHashingContextProvider; use rustc::session::config::{self, NoDebugInfo}; use rustc::session::Session; -use rustc::ty::layout::{LayoutError, LayoutOf, TyLayout}; +use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; use rustc_trans_utils; @@ -103,7 +103,7 @@ pub struct LocalCrateContext<'a, 'tcx: 'a> { lltypes: RefCell, Option), Type>>, scalar_lltypes: RefCell, Type>>, - pointee_infos: RefCell, Option>>, + pointee_infos: RefCell, Size), Option>>, isize_ty: Type, dbg_cx: Option>, @@ -516,7 +516,8 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> { &self.local().scalar_lltypes } - pub fn pointee_infos<'a>(&'a self) -> &'a RefCell, Option>> { + pub fn pointee_infos<'a>(&'a self) + -> &'a RefCell, Size), Option>> { &self.local().pointee_infos } diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index bf822249a64..3129ed028d4 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -35,7 +35,7 @@ pub fn lvalue_locals<'a, 'tcx>(mircx: &MirContext<'a, 'tcx>) -> BitVector { if layout.is_llvm_immediate() { // These sorts of types are immediates that we can store // in an ValueRef without an alloca. - } else if layout.is_llvm_scalar_pair(mircx.ccx) { + } else if layout.is_llvm_scalar_pair() { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that @@ -146,7 +146,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { let ty = self.cx.monomorphize(&ty.to_ty(self.cx.ccx.tcx())); let layout = self.cx.ccx.layout_of(ty); - if layout.is_llvm_scalar_pair(self.cx.ccx) { + if layout.is_llvm_scalar_pair() { return; } } diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 3196300a706..318e36dc71a 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -117,7 +117,12 @@ impl<'a, 'tcx> Const<'tcx> { } fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef { - const_get_elt(self.llval, ccx.layout_of(self.ty).llvm_field_index(i)) + let layout = ccx.layout_of(self.ty); + if let layout::Abi::ScalarPair(..) = layout.abi { + const_get_elt(self.llval, i as u64) + } else { + const_get_elt(self.llval, layout.llvm_field_index(i)) + } } fn get_pair(&self, ccx: &CrateContext<'a, 'tcx>) -> (ValueRef, ValueRef) { @@ -143,7 +148,7 @@ impl<'a, 'tcx> Const<'tcx> { let llty = layout.immediate_llvm_type(ccx); let llvalty = val_ty(self.llval); - let val = if llty == llvalty && layout.is_llvm_scalar_pair(ccx) { + let val = if llty == llvalty && layout.is_llvm_scalar_pair() { let (a, b) = self.get_pair(ccx); OperandValue::Pair(a, b) } else if llty == llvalty && layout.is_llvm_immediate() { @@ -1174,6 +1179,14 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, -> Const<'tcx> { assert_eq!(vals.len(), layout.fields.count()); + if let layout::Abi::ScalarPair(..) = layout.abi { + assert_eq!(vals.len(), 2); + return Const::new(C_struct(ccx, &[ + vals[0].llval, + vals[1].llval, + ], false), layout.ty); + } + // offset of current value let mut offset = Size::from_bytes(0); let mut cfields = Vec::new(); diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 8340d865eb1..ff0b4482678 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -175,10 +175,13 @@ impl<'a, 'tcx> LvalueRef<'tcx> { load }; OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout)) - } else if self.layout.is_llvm_scalar_pair(bcx.ccx) { - OperandValue::Pair( - self.project_field(bcx, 0).load(bcx).immediate(), - self.project_field(bcx, 1).load(bcx).immediate()) + } else if self.layout.is_llvm_scalar_pair() { + let load = |i| { + let x = self.project_field(bcx, i).load(bcx).immediate(); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + bcx.bitcast(x, self.layout.scalar_pair_element_llvm_type(bcx.ccx, i)) + }; + OperandValue::Pair(load(0), load(1)) } else { OperandValue::Ref(self.llval, self.alignment) }; @@ -190,17 +193,23 @@ impl<'a, 'tcx> LvalueRef<'tcx> { pub fn project_field(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> LvalueRef<'tcx> { let ccx = bcx.ccx; let field = self.layout.field(ccx, ix); - let offset = self.layout.fields.offset(ix).bytes(); + let offset = self.layout.fields.offset(ix); let alignment = self.alignment | Alignment::from(self.layout); let simple = || { + // Unions and newtypes only use an offset of 0. + let llval = if offset.bytes() == 0 { + self.llval + } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { + // Offsets have to match either first or second field. + assert_eq!(offset, a.value.size(ccx).abi_align(b.value.align(ccx))); + bcx.struct_gep(self.llval, 1) + } else { + bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) + }; LvalueRef { - // Unions and newtypes only use an offset of 0. - llval: if offset == 0 { - bcx.pointercast(self.llval, field.llvm_type(ccx).ptr_to()) - } else { - bcx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) - }, + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + llval: bcx.pointercast(llval, field.llvm_type(ccx).ptr_to()), llextra: if ccx.shared().type_has_metadata(field.ty) { self.llextra } else { @@ -249,7 +258,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let meta = self.llextra; - let unaligned_offset = C_usize(ccx, offset); + let unaligned_offset = C_usize(ccx, offset.bytes()); // Get the alignment of the field let (_, align) = glue::size_and_align_of_dst(bcx, field.ty, meta); diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 3e7aa9d0db5..b9d4148acf6 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -123,11 +123,8 @@ impl<'a, 'tcx> OperandRef<'tcx> { self, llty); // Reconstruct the immediate aggregate. let mut llpair = C_undef(llty); - let elems = [a, b]; - for i in 0..2 { - let elem = base::from_immediate(bcx, elems[i]); - llpair = bcx.insert_value(llpair, elem, self.layout.llvm_field_index(i)); - } + llpair = bcx.insert_value(llpair, a, 0); + llpair = bcx.insert_value(llpair, b, 1); llpair } else { self.immediate() @@ -139,18 +136,13 @@ impl<'a, 'tcx> OperandRef<'tcx> { llval: ValueRef, layout: TyLayout<'tcx>) -> OperandRef<'tcx> { - let val = if layout.is_llvm_scalar_pair(bcx.ccx) { + let val = if layout.is_llvm_scalar_pair() { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); // Deconstruct the immediate aggregate. - let a = bcx.extract_value(llval, layout.llvm_field_index(0)); - let a = base::to_immediate(bcx, a, layout.field(bcx.ccx, 0)); - - let b = bcx.extract_value(llval, layout.llvm_field_index(1)); - let b = base::to_immediate(bcx, b, layout.field(bcx.ccx, 1)); - - OperandValue::Pair(a, b) + OperandValue::Pair(bcx.extract_value(llval, 0), + bcx.extract_value(llval, 1)) } else { OperandValue::Immediate(llval) }; @@ -175,8 +167,11 @@ impl<'a, 'tcx> OperandValue { } OperandValue::Pair(a, b) => { for (i, &x) in [a, b].iter().enumerate() { - OperandValue::Immediate(x) - .store(bcx, dest.project_field(bcx, i)); + let field = dest.project_field(bcx, i); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + let x = bcx.bitcast(x, field.layout.immediate_llvm_type(bcx.ccx)); + bcx.store(base::from_immediate(bcx, x), + field.llval, field.alignment.non_abi()); } } } @@ -214,10 +209,15 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { match (o.val, &proj.elem) { (OperandValue::Pair(a, b), &mir::ProjectionElem::Field(ref f, ty)) => { + let layout = bcx.ccx.layout_of(self.monomorphize(&ty)); let llval = [a, b][f.index()]; + // HACK(eddyb) have to bitcast pointers + // until LLVM removes pointee types. + let llval = bcx.bitcast(llval, + layout.immediate_llvm_type(bcx.ccx)); return OperandRef { val: OperandValue::Immediate(llval), - layout: bcx.ccx.layout_of(self.monomorphize(&ty)) + layout }; } _ => {} diff --git a/src/librustc_trans/type_of.rs b/src/librustc_trans/type_of.rs index 6da6f1ebaf0..d62e2ac1552 100644 --- a/src/librustc_trans/type_of.rs +++ b/src/librustc_trans/type_of.rs @@ -29,6 +29,12 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, return Type::vector(&layout.field(ccx, 0).llvm_type(ccx), layout.fields.count() as u64); } + layout::Abi::ScalarPair(..) => { + return Type::struct_(ccx, &[ + layout.scalar_pair_element_llvm_type(ccx, 0), + layout.scalar_pair_element_llvm_type(ccx, 1), + ], false); + } layout::Abi::Uninhabited | layout::Abi::Aggregate { .. } => {} } @@ -174,12 +180,15 @@ pub struct PointeeInfo { pub trait LayoutLlvmExt<'tcx> { fn is_llvm_immediate(&self) -> bool; - fn is_llvm_scalar_pair<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> bool; + fn is_llvm_scalar_pair<'a>(&self) -> bool; fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type; + fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, + index: usize) -> Type; fn over_align(&self) -> Option; fn llvm_field_index(&self, index: usize) -> u64; - fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option; + fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size) + -> Option; } impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { @@ -188,26 +197,18 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { layout::Abi::Uninhabited | layout::Abi::Scalar(_) | layout::Abi::Vector => true, - + layout::Abi::ScalarPair(..) => false, layout::Abi::Aggregate { .. } => self.is_zst() } } - fn is_llvm_scalar_pair<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> bool { - match self.fields { - layout::FieldPlacement::Arbitrary { .. } => { - // There must be only 2 fields. - if self.fields.count() != 2 { - return false; - } - - // The two fields must be both scalars. - match (&self.field(ccx, 0).abi, &self.field(ccx, 1).abi) { - (&layout::Abi::Scalar(_), &layout::Abi::Scalar(_)) => true, - _ => false - } - } - _ => false + fn is_llvm_scalar_pair<'a>(&self) -> bool { + match self.abi { + layout::Abi::ScalarPair(..) => true, + layout::Abi::Uninhabited | + layout::Abi::Scalar(_) | + layout::Abi::Vector | + layout::Abi::Aggregate { .. } => false } } @@ -248,7 +249,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } _ => { // If we know the alignment, pick something better than i8. - if let Some(pointee) = self.pointee_info(ccx) { + if let Some(pointee) = self.pointee_info_at(ccx, Size::from_bytes(0)) { Type::pointee_for_abi_align(ccx, pointee.align) } else { Type::i8(ccx) @@ -310,6 +311,59 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { self.llvm_type(ccx) } + fn scalar_pair_element_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>, + index: usize) -> Type { + // HACK(eddyb) special-case fat pointers until LLVM removes + // pointee types, to avoid bitcasting every `OperandRef::deref`. + match self.ty.sty { + ty::TyRef(..) | + ty::TyRawPtr(_) => { + return self.field(ccx, index).llvm_type(ccx); + } + ty::TyAdt(def, _) if def.is_box() => { + return self.field(ccx, index).llvm_type(ccx); + } + _ => {} + } + + let (a, b) = match self.abi { + layout::Abi::ScalarPair(ref a, ref b) => (a, b), + _ => bug!("TyLayout::scalar_pair_element_llty({:?}): not applicable", self) + }; + let scalar = [a, b][index]; + + // Make sure to return the same type `immediate_llvm_type` would, + // to avoid dealing with two types and the associated conversions. + // This means that `(bool, bool)` is represented as `{i1, i1}`, + // both in memory and as an immediate, while `bool` is typically + // `i8` in memory and only `i1` when immediate. While we need to + // load/store `bool` as `i8` to avoid crippling LLVM optimizations, + // `i1` in a LLVM aggregate is valid and mostly equivalent to `i8`. + if scalar.is_bool() { + return Type::i1(ccx); + } + + match scalar.value { + layout::Int(i, _) => Type::from_integer(ccx, i), + layout::F32 => Type::f32(ccx), + layout::F64 => Type::f64(ccx), + layout::Pointer => { + // If we know the alignment, pick something better than i8. + let offset = if index == 0 { + Size::from_bytes(0) + } else { + a.value.size(ccx).abi_align(b.value.align(ccx)) + }; + let pointee = if let Some(pointee) = self.pointee_info_at(ccx, offset) { + Type::pointee_for_abi_align(ccx, pointee.align) + } else { + Type::i8(ccx) + }; + pointee.ptr_to() + } + } + } + fn over_align(&self) -> Option { if self.align != self.primitive_align { Some(self.align) @@ -319,8 +373,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } fn llvm_field_index(&self, index: usize) -> u64 { - if let layout::Abi::Scalar(_) = self.abi { - bug!("TyLayout::llvm_field_index({:?}): not applicable", self); + match self.abi { + layout::Abi::Scalar(_) | + layout::Abi::ScalarPair(..) => { + bug!("TyLayout::llvm_field_index({:?}): not applicable", self) + } + _ => {} } match self.fields { layout::FieldPlacement::Union(_) => { @@ -337,20 +395,15 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } - fn pointee_info<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option { - // We only handle thin pointers here. - match self.abi { - layout::Abi::Scalar(layout::Scalar { value: layout::Pointer, .. }) => {} - _ => return None - } - - if let Some(&pointee) = ccx.pointee_infos().borrow().get(&self.ty) { + fn pointee_info_at<'a>(&self, ccx: &CrateContext<'a, 'tcx>, offset: Size) + -> Option { + if let Some(&pointee) = ccx.pointee_infos().borrow().get(&(self.ty, offset)) { return pointee; } let mut result = None; match self.ty.sty { - ty::TyRawPtr(mt) => { + ty::TyRawPtr(mt) if offset.bytes() == 0 => { let (size, align) = ccx.size_and_align_of(mt.ty); result = Some(PointeeInfo { size, @@ -359,7 +412,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { }); } - ty::TyRef(_, mt) => { + ty::TyRef(_, mt) if offset.bytes() == 0 => { let (size, align) = ccx.size_and_align_of(mt.ty); let kind = match mt.mutbl { @@ -385,7 +438,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { }); } - ty::TyAdt(def, _) if def.is_box() => { + ty::TyAdt(def, _) if def.is_box() && offset.bytes() == 0 => { let (size, align) = ccx.size_and_align_of(self.ty.boxed_ty()); result = Some(PointeeInfo { size, @@ -408,7 +461,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { // to work as long as we don't start using more // niches than just null (e.g. the first page // of the address space, or unaligned pointers). - if self.fields.offset(0).bytes() == 0 { + if self.fields.offset(0) == offset { Some(self.for_variant(ccx, dataful_variant)) } else { None @@ -425,12 +478,16 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } if let Some(variant) = data_variant { + let ptr_end = offset + layout::Pointer.size(ccx); for i in 0..variant.fields.count() { - let field = variant.field(ccx, i); - if field.size == self.size { - // We found the pointer field, use its information. - result = field.pointee_info(ccx); - break; + let field_start = variant.fields.offset(i); + if field_start <= offset { + let field = variant.field(ccx, i); + if ptr_end <= field_start + field.size { + // We found the right field, look inside it. + result = field.pointee_info_at(ccx, offset - field_start); + break; + } } } } @@ -447,7 +504,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } - ccx.pointee_infos().borrow_mut().insert(self.ty, result); + ccx.pointee_infos().borrow_mut().insert((self.ty, offset), result); result } } diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index 525a1f5310c..2b35d454739 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -24,9 +24,9 @@ pub fn helper(_: usize) { pub fn no_op_slice_adjustment(x: &[u8]) -> &[u8] { // We used to generate an extra alloca and memcpy for the block's trailing expression value, so // check that we copy directly to the return value slot -// CHECK: %0 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } undef, [0 x i8]* %x.0, 1 -// CHECK: %1 = insertvalue { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %0, [[USIZE]] %x.1, 3 -// CHECK: ret { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] } %1 +// CHECK: %0 = insertvalue { [0 x i8]*, [[USIZE]] } undef, [0 x i8]* %x.0, 0 +// CHECK: %1 = insertvalue { [0 x i8]*, [[USIZE]] } %0, [[USIZE]] %x.1, 1 +// CHECK: ret { [0 x i8]*, [[USIZE]] } %1 { x } } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 6cb1972afa5..428cbdddb22 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -133,7 +133,7 @@ pub fn trait_borrow(_: &Drop) { pub fn trait_box(_: Box) { } -// CHECK: { [0 x i8], [0 x i16]*, [0 x i8], [[USIZE]], [0 x i8] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1) +// CHECK: { [0 x i16]*, [[USIZE]] } @return_slice([0 x i16]* noalias nonnull readonly %x.0, [[USIZE]] %x.1) #[no_mangle] pub fn return_slice(x: &[u16]) -> &[u16] { x diff --git a/src/test/codegen/packed.rs b/src/test/codegen/packed.rs index 87cf042f27e..64e842b026e 100644 --- a/src/test/codegen/packed.rs +++ b/src/test/codegen/packed.rs @@ -54,9 +54,6 @@ pub struct PackedPair(u8, u32); // CHECK-LABEL: @pkd_pair #[no_mangle] pub fn pkd_pair(pair1: &mut PackedPair, pair2: &mut PackedPair) { - // CHECK: [[V1:%[a-z0-9]+]] = load i8, i8* %{{.*}}, align 1 - // CHECK: [[V2:%[a-z0-9]+]] = load i32, i32* %{{.*}}, align 1 - // CHECK: store i8 [[V1]], i8* {{.*}}, align 1 - // CHECK: store i32 [[V2]], i32* {{.*}}, align 1 +// CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 5, i32 1, i1 false) *pair2 = *pair1; } diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index 2ab64fffa3b..0c084131ea3 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -24,9 +24,9 @@ pub fn helper(_: usize) { pub fn ref_dst(s: &[u8]) { // We used to generate an extra alloca and memcpy to ref the dst, so check that we copy // directly to the alloca for "x" -// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x to [0 x i8]** +// CHECK: [[X0:%[0-9]+]] = bitcast { [0 x i8]*, [[USIZE]] }* %x to [0 x i8]** // CHECK: store [0 x i8]* %s.0, [0 x i8]** [[X0]] -// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8], [0 x i8]*, [0 x i8], [[USIZE]], [0 x i8] }* %x, i32 0, i32 3 +// CHECK: [[X1:%[0-9]+]] = getelementptr {{.*}} { [0 x i8]*, [[USIZE]] }* %x, i32 0, i32 1 // CHECK: store [[USIZE]] %s.1, [[USIZE]]* [[X1]] let x = &*s; -- cgit 1.4.1-3-g733a5 From 37a7521ef93b2e2d7a4cd04df38929d841b8ffcc Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Mon, 9 Oct 2017 02:31:06 +0300 Subject: rustc: unpack scalar newtype layout ABIs. --- src/librustc/ty/layout.rs | 50 ++++++++++++++++++++++--------- src/librustc_trans/mir/analyze.rs | 4 +-- src/librustc_trans/mir/block.rs | 8 +++-- src/librustc_trans/mir/constant.rs | 54 +++++++++++++++++++++++++++------- src/librustc_trans/mir/operand.rs | 53 +++++++++++++++++++++++---------- src/test/codegen/function-arguments.rs | 6 ++-- src/test/codegen/issue-32031.rs | 4 +-- 7 files changed, 129 insertions(+), 50 deletions(-) (limited to 'src/test/codegen/function-arguments.rs') diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index 70c41e7402d..3bf711d3e23 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -1078,6 +1078,30 @@ impl<'a, 'tcx> CachedLayout { packed }; + // Unpack newtype ABIs. + if sized && optimize && size.bytes() > 0 { + // All but one field must be ZSTs, and so they all start at 0. + if offsets.iter().all(|o| o.bytes() == 0) { + let mut non_zst_fields = fields.iter().filter(|f| !f.is_zst()); + + // We have exactly one non-ZST field. + match (non_zst_fields.next(), non_zst_fields.next()) { + (Some(field), None) => { + // Field size match and it has a scalar ABI. + if size == field.size { + match field.abi { + Abi::Scalar(_) => { + abi = field.abi.clone(); + } + _ => {} + } + } + } + _ => {} + } + } + } + // Look for a scalar pair, as an ABI optimization. // FIXME(eddyb) ignore extra ZST fields and field ordering. if sized && !packed && fields.len() == 2 { @@ -1424,6 +1448,18 @@ impl<'a, 'tcx> CachedLayout { let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?; st.variants = Variants::Single { index: v }; + // Exclude 0 from the range of a newtype ABI NonZero. + if Some(def.did) == cx.tcx().lang_items().non_zero() { + match st.abi { + Abi::Scalar(ref mut scalar) | + Abi::ScalarPair(ref mut scalar, _) => { + if scalar.valid_range.start == 0 { + scalar.valid_range.start = 1; + } + } + _ => {} + } + } return Ok(tcx.intern_layout(st)); } @@ -2284,20 +2320,6 @@ impl<'a, 'tcx> TyLayout<'tcx> { }; } - // Is this the NonZero lang item wrapping a pointer or integer type? - if let ty::TyAdt(def, _) = self.ty.sty { - if Some(def.did) == cx.tcx().lang_items().non_zero() { - let field = self.field(cx, 0)?; - let offset = self.fields.offset(0); - if let Abi::Scalar(Scalar { value, ref valid_range }) = field.abi { - return Ok(Some((offset, Scalar { - value, - valid_range: 0..=valid_range.end - }, 0))); - } - } - } - // Perhaps one of the fields is non-zero, let's recurse and find out. if let FieldPlacement::Union(_) = self.fields { // Only Rust enums have safe-to-inspect fields diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs index 3f3c5ac0a62..223379527c9 100644 --- a/src/librustc_trans/mir/analyze.rs +++ b/src/librustc_trans/mir/analyze.rs @@ -139,7 +139,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { let ccx = self.cx.ccx; if let mir::Lvalue::Projection(ref proj) = *lvalue { - // Allow uses of projections that are ZSTs or from immediate scalar fields. + // Allow uses of projections that are ZSTs or from scalar fields. if let LvalueContext::Consume = context { let base_ty = proj.base.ty(self.cx.mir, ccx.tcx()); let base_ty = self.cx.monomorphize(&base_ty); @@ -153,7 +153,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> { if let mir::ProjectionElem::Field(..) = proj.elem { let layout = ccx.layout_of(base_ty.to_ty(ccx.tcx())); - if layout.is_llvm_scalar_pair() { + if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() { // Recurse as a `Consume` instead of `Projection`, // potentially stopping at non-operand projections, // which would trigger `mark_as_lvalue` on locals. diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index e739037b07d..6811861499d 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -700,11 +700,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { let elem = if field.is_zst() { C_undef(field.llvm_type(bcx.ccx)) } else { - bcx.extract_value(llval, tuple.layout.llvm_field_index(i)) + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + bcx.bitcast(llval, field.immediate_llvm_type(bcx.ccx)) }; // If the tuple is immediate, the elements are as well let op = OperandRef { - val: Immediate(base::to_immediate(bcx, elem, field)), + val: Immediate(elem), layout: field, }; self.trans_argument(bcx, op, llargs, &args[i]); @@ -712,7 +713,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { } Pair(a, b) => { let elems = [a, b]; - for i in 0..tuple.layout.fields.count() { + assert_eq!(tuple.layout.fields.count(), 2); + for i in 0..2 { // Pair is always made up of immediates let op = OperandRef { val: Immediate(elems[i]), diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 318e36dc71a..f223227cd72 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -118,10 +118,27 @@ impl<'a, 'tcx> Const<'tcx> { fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef { let layout = ccx.layout_of(self.ty); - if let layout::Abi::ScalarPair(..) = layout.abi { - const_get_elt(self.llval, i as u64) - } else { - const_get_elt(self.llval, layout.llvm_field_index(i)) + let field = layout.field(ccx, i); + if field.is_zst() { + return C_undef(field.immediate_llvm_type(ccx)); + } + match layout.abi { + layout::Abi::Scalar(_) => self.llval, + layout::Abi::ScalarPair(ref a, ref b) => { + let offset = layout.fields.offset(i); + if offset.bytes() == 0 { + assert_eq!(field.size, a.value.size(ccx)); + const_get_elt(self.llval, 0) + } else { + assert_eq!(offset, a.value.size(ccx) + .abi_align(b.value.align(ccx))); + assert_eq!(field.size, b.value.size(ccx)); + const_get_elt(self.llval, 1) + } + } + _ => { + const_get_elt(self.llval, layout.llvm_field_index(i)) + } } } @@ -159,7 +176,8 @@ impl<'a, 'tcx> Const<'tcx> { // a constant LLVM global and cast its address if necessary. let align = ccx.align_of(self.ty); let ptr = consts::addr_of(ccx, self.llval, align, "const"); - OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()), Alignment::AbiAligned) + OperandValue::Ref(consts::ptrcast(ptr, layout.llvm_type(ccx).ptr_to()), + Alignment::AbiAligned) }; OperandRef { @@ -1179,12 +1197,26 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, -> Const<'tcx> { assert_eq!(vals.len(), layout.fields.count()); - if let layout::Abi::ScalarPair(..) = layout.abi { - assert_eq!(vals.len(), 2); - return Const::new(C_struct(ccx, &[ - vals[0].llval, - vals[1].llval, - ], false), layout.ty); + match layout.abi { + layout::Abi::Scalar(_) | + layout::Abi::ScalarPair(..) if discr.is_none() => { + let mut non_zst_fields = vals.iter().enumerate().map(|(i, f)| { + (f, layout.fields.offset(i)) + }).filter(|&(f, _)| !ccx.layout_of(f.ty).is_zst()); + match (non_zst_fields.next(), non_zst_fields.next()) { + (Some((x, offset)), None) if offset.bytes() == 0 => { + return Const::new(x.llval, layout.ty); + } + (Some((a, a_offset)), Some((b, _))) if a_offset.bytes() == 0 => { + return Const::new(C_struct(ccx, &[a.llval, b.llval], false), layout.ty); + } + (Some((a, _)), Some((b, b_offset))) if b_offset.bytes() == 0 => { + return Const::new(C_struct(ccx, &[b.llval, a.llval], false), layout.ty); + } + _ => {} + } + } + _ => {} } // offset of current value diff --git a/src/librustc_trans/mir/operand.rs b/src/librustc_trans/mir/operand.rs index 97e7dda31aa..7826d998df3 100644 --- a/src/librustc_trans/mir/operand.rs +++ b/src/librustc_trans/mir/operand.rs @@ -10,12 +10,12 @@ use llvm::ValueRef; use rustc::ty; -use rustc::ty::layout::{LayoutOf, TyLayout}; +use rustc::ty::layout::{self, LayoutOf, TyLayout}; use rustc::mir; use rustc_data_structures::indexed_vec::Idx; use base; -use common::{CrateContext, C_undef}; +use common::{CrateContext, C_undef, C_usize}; use builder::Builder; use value::Value; use type_of::LayoutLlvmExt; @@ -207,24 +207,47 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if let mir::ProjectionElem::Field(ref f, _) = proj.elem { if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) { let layout = o.layout.field(bcx.ccx, f.index()); + let offset = o.layout.fields.offset(f.index()); // Handled in `trans_consume`. assert!(!layout.is_zst()); - match o.val { - OperandValue::Pair(a, b) => { - let llval = [a, b][f.index()]; - // HACK(eddyb) have to bitcast pointers - // until LLVM removes pointee types. - let llval = bcx.bitcast(llval, - layout.immediate_llvm_type(bcx.ccx)); - return Some(OperandRef { - val: OperandValue::Immediate(llval), - layout - }); + // Offset has to match a scalar component. + let llval = match (o.val, &o.layout.abi) { + (OperandValue::Immediate(llval), + &layout::Abi::Scalar(ref scalar)) => { + assert_eq!(offset.bytes(), 0); + assert_eq!(layout.size, scalar.value.size(bcx.ccx)); + llval } - _ => {} - } + (OperandValue::Pair(a_llval, b_llval), + &layout::Abi::ScalarPair(ref a, ref b)) => { + if offset.bytes() == 0 { + assert_eq!(layout.size, a.value.size(bcx.ccx)); + a_llval + } else { + assert_eq!(offset, a.value.size(bcx.ccx) + .abi_align(b.value.align(bcx.ccx))); + assert_eq!(layout.size, b.value.size(bcx.ccx)); + b_llval + } + } + + // `#[repr(simd)]` types are also immediate. + (OperandValue::Immediate(llval), + &layout::Abi::Vector) => { + bcx.extract_element(llval, C_usize(bcx.ccx, f.index() as u64)) + } + + _ => return None + }; + + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + let llval = bcx.bitcast(llval, layout.immediate_llvm_type(bcx.ccx)); + return Some(OperandRef { + val: OperandValue::Immediate(llval), + layout + }); } } } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index 428cbdddb22..f96c104b265 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -46,13 +46,13 @@ pub fn static_borrow(_: &'static i32) { pub fn named_borrow<'r>(_: &'r i32) { } -// CHECK: @unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0) +// CHECK: @unsafe_borrow(i16* dereferenceable(2) %arg0) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_borrow(_: &UnsafeInner) { } -// CHECK: @mutable_unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0) +// CHECK: @mutable_unsafe_borrow(i16* dereferenceable(2) %arg0) // ... unless this is a mutable borrow, those never alias // ... except that there's this LLVM bug that forces us to not use noalias, see #29485 #[no_mangle] @@ -110,7 +110,7 @@ pub fn slice(_: &[u8]) { pub fn mutable_slice(_: &mut [u8]) { } -// CHECK: @unsafe_slice([0 x %UnsafeInner]* nonnull %arg0.0, [[USIZE]] %arg0.1) +// CHECK: @unsafe_slice([0 x i16]* nonnull %arg0.0, [[USIZE]] %arg0.1) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_slice(_: &[UnsafeInner]) { diff --git a/src/test/codegen/issue-32031.rs b/src/test/codegen/issue-32031.rs index 5d3ccbfa4ce..e5ec1738545 100644 --- a/src/test/codegen/issue-32031.rs +++ b/src/test/codegen/issue-32031.rs @@ -15,7 +15,7 @@ #[no_mangle] pub struct F32(f32); -// CHECK: define float @add_newtype_f32(float, float) +// CHECK: define float @add_newtype_f32(float %a, float %b) #[inline(never)] #[no_mangle] pub fn add_newtype_f32(a: F32, b: F32) -> F32 { @@ -25,7 +25,7 @@ pub fn add_newtype_f32(a: F32, b: F32) -> F32 { #[no_mangle] pub struct F64(f64); -// CHECK: define double @add_newtype_f64(double, double) +// CHECK: define double @add_newtype_f64(double %a, double %b) #[inline(never)] #[no_mangle] pub fn add_newtype_f64(a: F64, b: F64) -> F64 { -- cgit 1.4.1-3-g733a5 From 88e4d2c2918428d55e34cd57c11279ea839c8822 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sun, 19 Nov 2017 12:13:24 +0200 Subject: rustc_trans: work around i686-pc-windows-msvc byval align LLVM bug. --- src/librustc_trans/abi.rs | 4 +++- src/test/codegen/function-arguments.rs | 4 ++-- src/test/codegen/packed.rs | 2 +- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'src/test/codegen/function-arguments.rs') diff --git a/src/librustc_trans/abi.rs b/src/librustc_trans/abi.rs index 7ef89597b11..0bf6e84337b 100644 --- a/src/librustc_trans/abi.rs +++ b/src/librustc_trans/abi.rs @@ -491,7 +491,9 @@ impl<'a, 'tcx> ArgType<'tcx> { .set(ArgAttribute::NoCapture) .set(ArgAttribute::NonNull); attrs.pointee_size = self.layout.size; - attrs.pointee_align = Some(self.layout.align); + // FIXME(eddyb) We should be doing this, but at least on + // i686-pc-windows-msvc, it results in wrong stack offsets. + // attrs.pointee_align = Some(self.layout.align); self.mode = PassMode::Indirect(attrs); } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index f96c104b265..f8945a6ee8d 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -66,7 +66,7 @@ pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) { pub fn mutable_borrow(_: &mut i32) { } -// CHECK: @indirect_struct(%S* noalias nocapture align 4 dereferenceable(32) %arg0) +// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32) %arg0) #[no_mangle] pub fn indirect_struct(_: S) { } @@ -83,7 +83,7 @@ pub fn _box(x: Box) -> Box { x } -// CHECK: @struct_return(%S* noalias nocapture sret align 4 dereferenceable(32)) +// CHECK: @struct_return(%S* noalias nocapture sret dereferenceable(32)) #[no_mangle] pub fn struct_return() -> S { S { diff --git a/src/test/codegen/packed.rs b/src/test/codegen/packed.rs index 64e842b026e..dd530cf03cd 100644 --- a/src/test/codegen/packed.rs +++ b/src/test/codegen/packed.rs @@ -39,7 +39,7 @@ pub struct BigPacked { #[no_mangle] pub fn call_pkd(f: fn() -> Array) -> BigPacked { // CHECK: [[ALLOCA:%[_a-z0-9]+]] = alloca %Array -// CHECK: call void %{{.*}}(%Array* noalias nocapture sret align 4 dereferenceable(32) [[ALLOCA]]) +// CHECK: call void %{{.*}}(%Array* noalias nocapture sret dereferenceable(32) [[ALLOCA]]) // CHECK: call void @llvm.memcpy.{{.*}}(i8* %{{.*}}, i8* %{{.*}}, i{{[0-9]+}} 32, i32 1, i1 false) // check that calls whose destination is a field of a packed struct // go through an alloca rather than calling the function with an -- cgit 1.4.1-3-g733a5