about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/back/write.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs11
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs48
-rw-r--r--compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs36
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs67
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm/ffi.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs9
-rw-r--r--compiler/rustc_codegen_llvm/src/type_.rs3
-rw-r--r--compiler/rustc_codegen_llvm/src/type_of.rs37
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs100
11 files changed, 210 insertions, 123 deletions
diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs
index 08aab849868..031bbd63361 100644
--- a/compiler/rustc_codegen_llvm/src/back/write.rs
+++ b/compiler/rustc_codegen_llvm/src/back/write.rs
@@ -519,12 +519,22 @@ pub(crate) unsafe fn llvm_optimize(
     let pgo_sample_use_path = get_pgo_sample_use_path(config);
     let is_lto = opt_stage == llvm::OptStage::ThinLTO || opt_stage == llvm::OptStage::FatLTO;
     let instr_profile_output_path = get_instr_profile_output_path(config);
+    let sanitize_dataflow_abilist: Vec<_> = config
+        .sanitizer_dataflow_abilist
+        .iter()
+        .map(|file| CString::new(file.as_str()).unwrap())
+        .collect();
+    let sanitize_dataflow_abilist_ptrs: Vec<_> =
+        sanitize_dataflow_abilist.iter().map(|file| file.as_ptr()).collect();
     // Sanitizer instrumentation is only inserted during the pre-link optimization stage.
     let sanitizer_options = if !is_lto {
         Some(llvm::SanitizerOptions {
             sanitize_address: config.sanitizer.contains(SanitizerSet::ADDRESS),
             sanitize_address_recover: config.sanitizer_recover.contains(SanitizerSet::ADDRESS),
             sanitize_cfi: config.sanitizer.contains(SanitizerSet::CFI),
+            sanitize_dataflow: config.sanitizer.contains(SanitizerSet::DATAFLOW),
+            sanitize_dataflow_abilist: sanitize_dataflow_abilist_ptrs.as_ptr(),
+            sanitize_dataflow_abilist_len: sanitize_dataflow_abilist_ptrs.len(),
             sanitize_kcfi: config.sanitizer.contains(SanitizerSet::KCFI),
             sanitize_memory: config.sanitizer.contains(SanitizerSet::MEMORY),
             sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY),
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index 2110e62a9c4..eaedaec635f 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -603,11 +603,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
                 let llptr = if i == 0 {
                     place.llval
                 } else {
-                    self.inbounds_gep(
-                        self.type_i8(),
-                        place.llval,
-                        &[self.const_usize(b_offset.bytes())],
-                    )
+                    self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
                 };
                 let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
                 let load = self.load(llty, llptr, align);
@@ -778,11 +774,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
         }
     }
 
-    fn struct_gep(&mut self, ty: &'ll Type, ptr: &'ll Value, idx: u64) -> &'ll Value {
-        assert_eq!(idx as c_uint as u64, idx);
-        unsafe { llvm::LLVMBuildStructGEP2(self.llbuilder, ty, ptr, idx as c_uint, UNNAMED) }
-    }
-
     /* Casts */
     fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value {
         unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, UNNAMED) }
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 7dfcf1ab50e..16122e5557e 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -685,8 +685,10 @@ impl<'ll> CodegenCx<'ll, '_> {
         let t_i64 = self.type_i64();
         let t_i128 = self.type_i128();
         let t_isize = self.type_isize();
+        let t_f16 = self.type_f16();
         let t_f32 = self.type_f32();
         let t_f64 = self.type_f64();
+        let t_f128 = self.type_f128();
         let t_metadata = self.type_metadata();
         let t_token = self.type_token();
 
@@ -728,69 +730,115 @@ impl<'ll> CodegenCx<'ll, '_> {
         ifn!("llvm.debugtrap", fn() -> void);
         ifn!("llvm.frameaddress", fn(t_i32) -> ptr);
 
+        ifn!("llvm.powi.f16", fn(t_f16, t_i32) -> t_f16);
         ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
         ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
+        ifn!("llvm.powi.f128", fn(t_f128, t_i32) -> t_f128);
 
+        ifn!("llvm.pow.f16", fn(t_f16, t_f16) -> t_f16);
         ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
         ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
+        ifn!("llvm.pow.f128", fn(t_f128, t_f128) -> t_f128);
 
+        ifn!("llvm.sqrt.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.sqrt.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.sin.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.sin.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.cos.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.cos.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.exp.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.exp.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.exp2.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.exp2.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.log.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.log.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.log10.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.log10.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.log2.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.log2.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.fma.f16", fn(t_f16, t_f16, t_f16) -> t_f16);
         ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
         ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
+        ifn!("llvm.fma.f128", fn(t_f128, t_f128, t_f128) -> t_f128);
 
+        ifn!("llvm.fabs.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.fabs.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.minnum.f16", fn(t_f16, t_f16) -> t_f16);
         ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
         ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
+        ifn!("llvm.minnum.f128", fn(t_f128, t_f128) -> t_f128);
+
+        ifn!("llvm.maxnum.f16", fn(t_f16, t_f16) -> t_f16);
         ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
         ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
+        ifn!("llvm.maxnum.f128", fn(t_f128, t_f128) -> t_f128);
 
+        ifn!("llvm.floor.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.floor.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.ceil.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.ceil.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.trunc.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.trunc.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.copysign.f16", fn(t_f16, t_f16) -> t_f16);
         ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
         ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
+        ifn!("llvm.copysign.f128", fn(t_f128, t_f128) -> t_f128);
 
+        ifn!("llvm.round.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.round.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.roundeven.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.roundeven.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.roundeven.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.roundeven.f128", fn(t_f128) -> t_f128);
 
+        ifn!("llvm.rint.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.rint.f128", fn(t_f128) -> t_f128);
+
+        ifn!("llvm.nearbyint.f16", fn(t_f16) -> t_f16);
         ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
         ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
+        ifn!("llvm.nearbyint.f128", fn(t_f128) -> t_f128);
 
         ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
         ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
index 6116a6fd222..c45787f35aa 100644
--- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
+++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs
@@ -357,31 +357,27 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) {
 
     let ignore_unused_generics = tcx.sess.instrument_coverage_except_unused_generics();
 
-    let eligible_def_ids: Vec<DefId> = tcx
-        .mir_keys(())
-        .iter()
-        .filter_map(|local_def_id| {
-            let def_id = local_def_id.to_def_id();
-            let kind = tcx.def_kind(def_id);
-            // `mir_keys` will give us `DefId`s for all kinds of things, not
-            // just "functions", like consts, statics, etc. Filter those out.
-            // If `ignore_unused_generics` was specified, filter out any
-            // generic functions from consideration as well.
-            if !matches!(kind, DefKind::Fn | DefKind::AssocFn | DefKind::Closure) {
-                return None;
-            }
-            if ignore_unused_generics && tcx.generics_of(def_id).requires_monomorphization(tcx) {
-                return None;
-            }
-            Some(local_def_id.to_def_id())
-        })
-        .collect();
+    let eligible_def_ids = tcx.mir_keys(()).iter().filter_map(|local_def_id| {
+        let def_id = local_def_id.to_def_id();
+        let kind = tcx.def_kind(def_id);
+        // `mir_keys` will give us `DefId`s for all kinds of things, not
+        // just "functions", like consts, statics, etc. Filter those out.
+        // If `ignore_unused_generics` was specified, filter out any
+        // generic functions from consideration as well.
+        if !matches!(kind, DefKind::Fn | DefKind::AssocFn | DefKind::Closure) {
+            return None;
+        }
+        if ignore_unused_generics && tcx.generics_of(def_id).requires_monomorphization(tcx) {
+            return None;
+        }
+        Some(local_def_id.to_def_id())
+    });
 
     let codegenned_def_ids = codegenned_and_inlined_items(tcx);
 
     // For each `DefId` that should have coverage instrumentation but wasn't
     // codegenned, add it to the function coverage map as an unused function.
-    for def_id in eligible_def_ids.into_iter().filter(|id| !codegenned_def_ids.contains(id)) {
+    for def_id in eligible_def_ids.filter(|id| !codegenned_def_ids.contains(id)) {
         // Skip any function that didn't have coverage data added to it by the
         // coverage instrumentor.
         let body = tcx.instance_mir(ty::InstanceDef::Item(def_id));
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 045b6d2b651..1a5f9b42947 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -695,7 +695,7 @@ impl MsvcBasicName for ty::UintTy {
 
 impl MsvcBasicName for ty::FloatTy {
     fn msvc_basic_name(self) -> &'static str {
-        // FIXME: f16 and f128 have no MSVE representation. We could improve the debuginfo.
+        // FIXME: f16 and f128 have no MSVC representation. We could improve the debuginfo.
         // See: <https://github.com/rust-lang/rust/pull/114607/files#r1454683264>
         match self {
             ty::FloatTy::F16 => "half",
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 1d4ab866cb3..f33a672aff0 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -28,51 +28,118 @@ fn get_simple_intrinsic<'ll>(
     name: Symbol,
 ) -> Option<(&'ll Type, &'ll Value)> {
     let llvm_name = match name {
+        sym::sqrtf16 => "llvm.sqrt.f16",
         sym::sqrtf32 => "llvm.sqrt.f32",
         sym::sqrtf64 => "llvm.sqrt.f64",
+        sym::sqrtf128 => "llvm.sqrt.f128",
+
+        sym::powif16 => "llvm.powi.f16",
         sym::powif32 => "llvm.powi.f32",
         sym::powif64 => "llvm.powi.f64",
+        sym::powif128 => "llvm.powi.f128",
+
+        sym::sinf16 => "llvm.sin.f16",
         sym::sinf32 => "llvm.sin.f32",
         sym::sinf64 => "llvm.sin.f64",
+        sym::sinf128 => "llvm.sin.f128",
+
+        sym::cosf16 => "llvm.cos.f16",
         sym::cosf32 => "llvm.cos.f32",
         sym::cosf64 => "llvm.cos.f64",
+        sym::cosf128 => "llvm.cos.f128",
+
+        sym::powf16 => "llvm.pow.f16",
         sym::powf32 => "llvm.pow.f32",
         sym::powf64 => "llvm.pow.f64",
+        sym::powf128 => "llvm.pow.f128",
+
+        sym::expf16 => "llvm.exp.f16",
         sym::expf32 => "llvm.exp.f32",
         sym::expf64 => "llvm.exp.f64",
+        sym::expf128 => "llvm.exp.f128",
+
+        sym::exp2f16 => "llvm.exp2.f16",
         sym::exp2f32 => "llvm.exp2.f32",
         sym::exp2f64 => "llvm.exp2.f64",
+        sym::exp2f128 => "llvm.exp2.f128",
+
+        sym::logf16 => "llvm.log.f16",
         sym::logf32 => "llvm.log.f32",
         sym::logf64 => "llvm.log.f64",
+        sym::logf128 => "llvm.log.f128",
+
+        sym::log10f16 => "llvm.log10.f16",
         sym::log10f32 => "llvm.log10.f32",
         sym::log10f64 => "llvm.log10.f64",
+        sym::log10f128 => "llvm.log10.f128",
+
+        sym::log2f16 => "llvm.log2.f16",
         sym::log2f32 => "llvm.log2.f32",
         sym::log2f64 => "llvm.log2.f64",
+        sym::log2f128 => "llvm.log2.f128",
+
+        sym::fmaf16 => "llvm.fma.f16",
         sym::fmaf32 => "llvm.fma.f32",
         sym::fmaf64 => "llvm.fma.f64",
+        sym::fmaf128 => "llvm.fma.f128",
+
+        sym::fabsf16 => "llvm.fabs.f16",
         sym::fabsf32 => "llvm.fabs.f32",
         sym::fabsf64 => "llvm.fabs.f64",
+        sym::fabsf128 => "llvm.fabs.f128",
+
+        sym::minnumf16 => "llvm.minnum.f16",
         sym::minnumf32 => "llvm.minnum.f32",
         sym::minnumf64 => "llvm.minnum.f64",
+        sym::minnumf128 => "llvm.minnum.f128",
+
+        sym::maxnumf16 => "llvm.maxnum.f16",
         sym::maxnumf32 => "llvm.maxnum.f32",
         sym::maxnumf64 => "llvm.maxnum.f64",
+        sym::maxnumf128 => "llvm.maxnum.f128",
+
+        sym::copysignf16 => "llvm.copysign.f16",
         sym::copysignf32 => "llvm.copysign.f32",
         sym::copysignf64 => "llvm.copysign.f64",
+        sym::copysignf128 => "llvm.copysign.f128",
+
+        sym::floorf16 => "llvm.floor.f16",
         sym::floorf32 => "llvm.floor.f32",
         sym::floorf64 => "llvm.floor.f64",
+        sym::floorf128 => "llvm.floor.f128",
+
+        sym::ceilf16 => "llvm.ceil.f16",
         sym::ceilf32 => "llvm.ceil.f32",
         sym::ceilf64 => "llvm.ceil.f64",
+        sym::ceilf128 => "llvm.ceil.f128",
+
+        sym::truncf16 => "llvm.trunc.f16",
         sym::truncf32 => "llvm.trunc.f32",
         sym::truncf64 => "llvm.trunc.f64",
+        sym::truncf128 => "llvm.trunc.f128",
+
+        sym::rintf16 => "llvm.rint.f16",
         sym::rintf32 => "llvm.rint.f32",
         sym::rintf64 => "llvm.rint.f64",
+        sym::rintf128 => "llvm.rint.f128",
+
+        sym::nearbyintf16 => "llvm.nearbyint.f16",
         sym::nearbyintf32 => "llvm.nearbyint.f32",
         sym::nearbyintf64 => "llvm.nearbyint.f64",
+        sym::nearbyintf128 => "llvm.nearbyint.f128",
+
+        sym::roundf16 => "llvm.round.f16",
         sym::roundf32 => "llvm.round.f32",
         sym::roundf64 => "llvm.round.f64",
+        sym::roundf128 => "llvm.round.f128",
+
         sym::ptr_mask => "llvm.ptrmask",
+
+        sym::roundevenf16 => "llvm.roundeven.f16",
         sym::roundevenf32 => "llvm.roundeven.f32",
         sym::roundevenf64 => "llvm.roundeven.f64",
+        sym::roundevenf128 => "llvm.roundeven.f128",
+
         _ => return None,
     };
     Some(cx.get_intrinsic(llvm_name))
diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
index 3c20b579349..952f41fbd90 100644
--- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs
@@ -480,6 +480,9 @@ pub struct SanitizerOptions {
     pub sanitize_address: bool,
     pub sanitize_address_recover: bool,
     pub sanitize_cfi: bool,
+    pub sanitize_dataflow: bool,
+    pub sanitize_dataflow_abilist: *const *const c_char,
+    pub sanitize_dataflow_abilist_len: size_t,
     pub sanitize_kcfi: bool,
     pub sanitize_memory: bool,
     pub sanitize_memory_recover: bool,
@@ -1303,13 +1306,6 @@ extern "C" {
         NumIndices: c_uint,
         Name: *const c_char,
     ) -> &'a Value;
-    pub fn LLVMBuildStructGEP2<'a>(
-        B: &Builder<'a>,
-        Ty: &'a Type,
-        Pointer: &'a Value,
-        Idx: c_uint,
-        Name: *const c_char,
-    ) -> &'a Value;
 
     // Casts
     pub fn LLVMBuildTrunc<'a>(
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 1b2beac56a2..75a189aa87c 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -192,14 +192,11 @@ impl<'a> IntoIterator for LLVMFeature<'a> {
 // to LLVM or the feature detection code will walk past the end of the feature
 // array, leading to crashes.
 //
-// To find a list of LLVM's names, check llvm-project/llvm/include/llvm/Support/*TargetParser.def
-// where the * matches the architecture's name
-//
-// For targets not present in the above location, see llvm-project/llvm/lib/Target/{ARCH}/*.td
+// To find a list of LLVM's names, see llvm-project/llvm/lib/Target/{ARCH}/*.td
 // where `{ARCH}` is the architecture name. Look for instances of `SubtargetFeature`.
 //
-// Beware to not use the llvm github project for this, but check the git submodule
-// found in src/llvm-project
+// Check the current rustc fork of LLVM in the repo at https://github.com/rust-lang/llvm-project/.
+// The commit in use can be found via the `llvm-project` submodule in https://github.com/rust-lang/rust/tree/master/src
 // Though note that Rust can also be build with an external precompiled version of LLVM
 // which might lead to failures if the oldest tested / supported LLVM version
 // doesn't yet support the relevant intrinsics
diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs
index 8f180bab3df..07a4861ed73 100644
--- a/compiler/rustc_codegen_llvm/src/type_.rs
+++ b/compiler/rustc_codegen_llvm/src/type_.rs
@@ -261,9 +261,6 @@ impl<'ll, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
     fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
         layout.is_llvm_scalar_pair()
     }
-    fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
-        layout.llvm_field_index(self, index)
-    }
     fn scalar_pair_element_backend_type(
         &self,
         layout: TyAndLayout<'tcx>,
diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs
index 369dcfb7b8f..587adefe1d2 100644
--- a/compiler/rustc_codegen_llvm/src/type_of.rs
+++ b/compiler/rustc_codegen_llvm/src/type_of.rs
@@ -174,7 +174,6 @@ pub trait LayoutLlvmExt<'tcx> {
         index: usize,
         immediate: bool,
     ) -> &'a Type;
-    fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64;
     fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>;
 }
 
@@ -326,42 +325,6 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
         self.scalar_llvm_type_at(cx, scalar)
     }
 
-    fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 {
-        match self.abi {
-            Abi::Scalar(_) | Abi::ScalarPair(..) => {
-                bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
-            }
-            _ => {}
-        }
-        match self.fields {
-            FieldsShape::Primitive | FieldsShape::Union(_) => {
-                bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
-            }
-
-            FieldsShape::Array { .. } => index as u64,
-
-            FieldsShape::Arbitrary { .. } => {
-                let variant_index = match self.variants {
-                    Variants::Single { index } => Some(index),
-                    _ => None,
-                };
-
-                // Look up llvm field if indexes do not match memory order due to padding. If
-                // `field_remapping` is `None` no padding was used and the llvm field index
-                // matches the memory index.
-                match cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
-                    Some(TypeLowering { field_remapping: Some(ref remap), .. }) => {
-                        remap[index] as u64
-                    }
-                    Some(_) => self.fields.memory_index(index) as u64,
-                    None => {
-                        bug!("TyAndLayout::llvm_field_index({:?}): type info not found", self)
-                    }
-                }
-            }
-        }
-    }
-
     fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type> {
         debug_assert!(self.is_sized());
 
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index 172c66a7af1..b406a04af74 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -44,12 +44,12 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
 
     let aligned_size = size.align_to(slot_size).bytes() as i32;
     let full_direct_size = bx.cx().const_i32(aligned_size);
-    let next = bx.inbounds_gep(bx.type_i8(), addr, &[full_direct_size]);
+    let next = bx.inbounds_ptradd(addr, full_direct_size);
     bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
 
     if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
         let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
-        let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]);
+        let adjusted = bx.inbounds_ptradd(addr, adjusted_size);
         (adjusted, addr_align)
     } else {
         (addr, addr_align)
@@ -89,11 +89,31 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
     list: OperandRef<'tcx, &'ll Value>,
     target_ty: Ty<'tcx>,
 ) -> &'ll Value {
+    let dl = bx.cx.data_layout();
+
     // Implementation of the AAPCS64 calling convention for va_args see
     // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
+    //
+    // typedef struct  va_list {
+    //     void * stack; // next stack param
+    //     void * gr_top; // end of GP arg reg save area
+    //     void * vr_top; // end of FP/SIMD arg reg save area
+    //     int gr_offs; // offset from  gr_top to next GP register arg
+    //     int vr_offs; // offset from  vr_top to next FP/SIMD register arg
+    // } va_list;
     let va_list_addr = list.immediate();
-    let va_list_layout = list.deref(bx.cx).layout;
-    let va_list_ty = va_list_layout.llvm_type(bx);
+
+    // There is no padding between fields since `void*` is size=8 align=8, `int` is size=4 align=4.
+    // See https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
+    // Table 1, Byte size and byte alignment of fundamental data types
+    // Table 3, Mapping of C & C++ built-in data types
+    let ptr_offset = 8;
+    let i32_offset = 4;
+    let gr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(ptr_offset));
+    let vr_top = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * ptr_offset));
+    let gr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset));
+    let vr_offs = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(3 * ptr_offset + i32_offset));
+
     let layout = bx.cx.layout_of(target_ty);
 
     let maybe_reg = bx.append_sibling_block("va_arg.maybe_reg");
@@ -104,16 +124,12 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
     let offset_align = Align::from_bytes(4).unwrap();
 
     let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
-    let (reg_off, reg_top_index, slot_size) = if gr_type {
-        let gr_offs =
-            bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
+    let (reg_off, reg_top, slot_size) = if gr_type {
         let nreg = (layout.size.bytes() + 7) / 8;
-        (gr_offs, va_list_layout.llvm_field_index(bx.cx, 1), nreg * 8)
+        (gr_offs, gr_top, nreg * 8)
     } else {
-        let vr_off =
-            bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 4));
         let nreg = (layout.size.bytes() + 15) / 16;
-        (vr_off, va_list_layout.llvm_field_index(bx.cx, 2), nreg * 16)
+        (vr_offs, vr_top, nreg * 16)
     };
 
     // if the offset >= 0 then the value will be on the stack
@@ -141,15 +157,14 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
 
     bx.switch_to_block(in_reg);
     let top_type = bx.type_ptr();
-    let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index);
-    let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
+    let top = bx.load(top_type, reg_top, dl.pointer_align.abi);
 
     // reg_value = *(@top + reg_off_v);
-    let mut reg_addr = bx.gep(bx.type_i8(), top, &[reg_off_v]);
+    let mut reg_addr = bx.ptradd(top, reg_off_v);
     if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size {
         // On big-endian systems the value is right-aligned in its slot.
         let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
-        reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]);
+        reg_addr = bx.ptradd(reg_addr, offset);
     }
     let reg_type = layout.llvm_type(bx);
     let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
@@ -173,11 +188,29 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     list: OperandRef<'tcx, &'ll Value>,
     target_ty: Ty<'tcx>,
 ) -> &'ll Value {
+    let dl = bx.cx.data_layout();
+
     // Implementation of the s390x ELF ABI calling convention for va_args see
     // https://github.com/IBM/s390x-abi (chapter 1.2.4)
+    //
+    // typedef struct __va_list_tag {
+    //     long __gpr;
+    //     long __fpr;
+    //     void *__overflow_arg_area;
+    //     void *__reg_save_area;
+    // } va_list[1];
     let va_list_addr = list.immediate();
-    let va_list_layout = list.deref(bx.cx).layout;
-    let va_list_ty = va_list_layout.llvm_type(bx);
+
+    // There is no padding between fields since `long` and `void*` both have size=8 align=8.
+    // https://github.com/IBM/s390x-abi (Table 1.1.: Scalar types)
+    let i64_offset = 8;
+    let ptr_offset = 8;
+    let gpr = va_list_addr;
+    let fpr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(i64_offset));
+    let overflow_arg_area = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset));
+    let reg_save_area =
+        bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(2 * i64_offset + ptr_offset));
+
     let layout = bx.cx.layout_of(target_ty);
 
     let in_reg = bx.append_sibling_block("va_arg.in_reg");
@@ -192,15 +225,10 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     let padding = padded_size - unpadded_size;
 
     let gpr_type = indirect || !layout.is_single_fp_element(bx.cx);
-    let (max_regs, reg_count_field, reg_save_index, reg_padding) =
-        if gpr_type { (5, 0, 2, padding) } else { (4, 1, 16, 0) };
+    let (max_regs, reg_count, reg_save_index, reg_padding) =
+        if gpr_type { (5, gpr, 2, padding) } else { (4, fpr, 16, 0) };
 
     // Check whether the value was passed in a register or in memory.
-    let reg_count = bx.struct_gep(
-        va_list_ty,
-        va_list_addr,
-        va_list_layout.llvm_field_index(bx.cx, reg_count_field),
-    );
     let reg_count_v = bx.load(bx.type_i64(), reg_count, Align::from_bytes(8).unwrap());
     let use_regs = bx.icmp(IntPredicate::IntULT, reg_count_v, bx.const_u64(max_regs));
     bx.cond_br(use_regs, in_reg, in_mem);
@@ -209,12 +237,10 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     bx.switch_to_block(in_reg);
 
     // Work out the address of the value in the register save area.
-    let reg_ptr =
-        bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
-    let reg_ptr_v = bx.load(bx.type_ptr(), reg_ptr, bx.tcx().data_layout.pointer_align.abi);
+    let reg_ptr_v = bx.load(bx.type_ptr(), reg_save_area, dl.pointer_align.abi);
     let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
     let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
-    let reg_addr = bx.gep(bx.type_i8(), reg_ptr_v, &[reg_off]);
+    let reg_addr = bx.ptradd(reg_ptr_v, reg_off);
 
     // Update the register count.
     let new_reg_count_v = bx.add(reg_count_v, bx.const_u64(1));
@@ -225,27 +251,23 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
     bx.switch_to_block(in_mem);
 
     // Work out the address of the value in the argument overflow area.
-    let arg_ptr =
-        bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 2));
-    let arg_ptr_v = bx.load(bx.type_ptr(), arg_ptr, bx.tcx().data_layout.pointer_align.abi);
+    let arg_ptr_v =
+        bx.load(bx.type_ptr(), overflow_arg_area, bx.tcx().data_layout.pointer_align.abi);
     let arg_off = bx.const_u64(padding);
-    let mem_addr = bx.gep(bx.type_i8(), arg_ptr_v, &[arg_off]);
+    let mem_addr = bx.ptradd(arg_ptr_v, arg_off);
 
     // Update the argument overflow area pointer.
     let arg_size = bx.cx().const_u64(padded_size);
-    let new_arg_ptr_v = bx.inbounds_gep(bx.type_i8(), arg_ptr_v, &[arg_size]);
-    bx.store(new_arg_ptr_v, arg_ptr, bx.tcx().data_layout.pointer_align.abi);
+    let new_arg_ptr_v = bx.inbounds_ptradd(arg_ptr_v, arg_size);
+    bx.store(new_arg_ptr_v, overflow_arg_area, dl.pointer_align.abi);
     bx.br(end);
 
     // Return the appropriate result.
     bx.switch_to_block(end);
     let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
     let val_type = layout.llvm_type(bx);
-    let val_addr = if indirect {
-        bx.load(bx.cx.type_ptr(), val_addr, bx.tcx().data_layout.pointer_align.abi)
-    } else {
-        val_addr
-    };
+    let val_addr =
+        if indirect { bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) } else { val_addr };
     bx.load(val_type, val_addr, layout.align.abi)
 }