diff options
| author | bors <bors@rust-lang.org> | 2021-08-11 01:36:23 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2021-08-11 01:36:23 +0000 |
| commit | 47b41b7788a6f85c749049062f1e4eed497cd894 (patch) | |
| tree | f68a519116440d8e4194981cdbacdaab12f9abf1 /compiler/rustc_codegen_llvm/src/context.rs | |
| parent | e8e1b32a7840c07f30c04b252c379a044a73902d (diff) | |
| parent | 02295f464aaf78ece81a80e5b99a034119e74748 (diff) | |
| download | rust-47b41b7788a6f85c749049062f1e4eed497cd894.tar.gz rust-47b41b7788a6f85c749049062f1e4eed497cd894.zip | |
Auto merge of #87254 - rusticstuff:rustc_codegen_llvm_dont_emit_zero_sized_padding, r=eddyb
LLVM codegen: Don't emit zero-sized padding for fields Currently padding is emitted before fields of a struct and at the end of the struct regardless of the ABI. Even if no padding is required zero-sized padding fields are emitted. This is not useful and - more importantly - it make it impossible to generate the exact vector types that LLVM expects for certain ARM SIMD intrinsics. This change should unblock the implementation of many ARM intrinsics using the `unadjusted` ABI, see https://github.com/rust-lang/stdarch/issues/1143#issuecomment-827404092. This is a proof of concept only because the field lookup now takes O(number of fields) time compared to O(1) before since it recalculates the mapping at every lookup. I would like to find out how big the performance impact actually is before implementing caching or restricting this behavior to the `unadjusted` ABI. cc `@SparrowLii` `@bjorn3` ([Discussion on internals](https://internals.rust-lang.org/t/feature-request-add-a-way-in-rustc-for-generating-struct-type-llvm-ir-without-paddings/15007))
Diffstat (limited to 'compiler/rustc_codegen_llvm/src/context.rs')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/context.rs | 18 |
1 files changed, 16 insertions, 2 deletions
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 685f222e802..5d56c93f835 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -24,6 +24,7 @@ use rustc_span::source_map::{Span, DUMMY_SP}; use rustc_span::symbol::Symbol; use rustc_target::abi::{HasDataLayout, LayoutOf, PointeeInfo, Size, TargetDataLayout, VariantIdx}; use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel}; +use smallvec::SmallVec; use std::cell::{Cell, RefCell}; use std::ffi::CStr; @@ -74,8 +75,12 @@ pub struct CodegenCx<'ll, 'tcx> { /// See <https://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details pub used_statics: RefCell<Vec<&'ll Value>>, - pub lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), &'ll Type>>, + /// Mapping of non-scalar types to llvm types and field remapping if needed. + pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>, + + /// Mapping of scalar types to llvm types. pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>, + pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>, pub isize_ty: &'ll Type, @@ -92,6 +97,15 @@ pub struct CodegenCx<'ll, 'tcx> { local_gen_sym_counter: Cell<usize>, } +pub struct TypeLowering<'ll> { + /// Associated LLVM type + pub lltype: &'ll Type, + + /// If padding is used the slice maps fields from source order + /// to llvm order. + pub field_remapping: Option<SmallVec<[u32; 4]>>, +} + fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode { match tls_model { TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic, @@ -304,7 +318,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { const_globals: Default::default(), statics_to_rauw: RefCell::new(Vec::new()), used_statics: RefCell::new(Vec::new()), - lltypes: Default::default(), + type_lowering: Default::default(), scalar_lltypes: Default::default(), pointee_infos: Default::default(), isize_ty, |
