about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
-rw-r--r--compiler/rustc_codegen_llvm/src/abi.rs69
-rw-r--r--compiler/rustc_codegen_llvm/src/builder.rs19
-rw-r--r--compiler/rustc_codegen_llvm/src/consts.rs12
-rw-r--r--compiler/rustc_codegen_llvm/src/context.rs1
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs14
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/mod.rs10
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs118
-rw-r--r--compiler/rustc_codegen_llvm/src/llvm_util.rs8
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs185
10 files changed, 312 insertions, 126 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs
index c87e70864e5..119cd634f98 100644
--- a/compiler/rustc_codegen_llvm/src/abi.rs
+++ b/compiler/rustc_codegen_llvm/src/abi.rs
@@ -2,7 +2,10 @@ use std::borrow::Borrow;
 use std::cmp;
 
 use libc::c_uint;
-use rustc_abi::{BackendRepr, HasDataLayout, Primitive, Reg, RegKind, Size};
+use rustc_abi::{
+    ArmCall, BackendRepr, CanonAbi, HasDataLayout, InterruptKind, Primitive, Reg, RegKind, Size,
+    X86Call,
+};
 use rustc_codegen_ssa::MemFlags;
 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
 use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue};
@@ -12,7 +15,7 @@ use rustc_middle::ty::layout::LayoutOf;
 use rustc_middle::{bug, ty};
 use rustc_session::config;
 use rustc_target::callconv::{
-    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, Conv, FnAbi, PassMode,
+    ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, CastTarget, FnAbi, PassMode,
 };
 use rustc_target::spec::SanitizerSet;
 use smallvec::SmallVec;
@@ -409,11 +412,17 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         if !self.can_unwind {
             func_attrs.push(llvm::AttributeKind::NoUnwind.create_attr(cx.llcx));
         }
-        if let Conv::RiscvInterrupt { kind } = self.conv {
-            func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", kind.as_str()));
-        }
-        if let Conv::CCmseNonSecureEntry = self.conv {
-            func_attrs.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"))
+        match self.conv {
+            CanonAbi::Interrupt(InterruptKind::RiscvMachine) => {
+                func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", "machine"))
+            }
+            CanonAbi::Interrupt(InterruptKind::RiscvSupervisor) => {
+                func_attrs.push(llvm::CreateAttrStringValue(cx.llcx, "interrupt", "supervisor"))
+            }
+            CanonAbi::Arm(ArmCall::CCmseNonSecureEntry) => {
+                func_attrs.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry"))
+            }
+            _ => (),
         }
         attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &{ func_attrs });
 
@@ -600,7 +609,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
             llvm::SetInstructionCallConv(callsite, cconv);
         }
 
-        if self.conv == Conv::CCmseNonSecureCall {
+        if self.conv == CanonAbi::Arm(ArmCall::CCmseNonSecureCall) {
             // This will probably get ignored on all targets but those supporting the TrustZone-M
             // extension (thumbv8m targets).
             let cmse_nonsecure_call = llvm::CreateAttrString(bx.cx.llcx, "cmse_nonsecure_call");
@@ -636,17 +645,11 @@ impl AbiBuilderMethods for Builder<'_, '_, '_> {
 }
 
 impl llvm::CallConv {
-    pub(crate) fn from_conv(conv: Conv, arch: &str) -> Self {
+    pub(crate) fn from_conv(conv: CanonAbi, arch: &str) -> Self {
         match conv {
-            Conv::C
-            | Conv::Rust
-            | Conv::CCmseNonSecureCall
-            | Conv::CCmseNonSecureEntry
-            | Conv::RiscvInterrupt { .. } => llvm::CCallConv,
-            Conv::Cold => llvm::ColdCallConv,
-            Conv::PreserveMost => llvm::PreserveMost,
-            Conv::PreserveAll => llvm::PreserveAll,
-            Conv::GpuKernel => {
+            CanonAbi::C | CanonAbi::Rust => llvm::CCallConv,
+            CanonAbi::RustCold => llvm::PreserveMost,
+            CanonAbi::GpuKernel => {
                 if arch == "amdgpu" {
                     llvm::AmdgpuKernel
                 } else if arch == "nvptx64" {
@@ -655,17 +658,25 @@ impl llvm::CallConv {
                     panic!("Architecture {arch} does not support GpuKernel calling convention");
                 }
             }
-            Conv::AvrInterrupt => llvm::AvrInterrupt,
-            Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
-            Conv::ArmAapcs => llvm::ArmAapcsCallConv,
-            Conv::Msp430Intr => llvm::Msp430Intr,
-            Conv::X86Fastcall => llvm::X86FastcallCallConv,
-            Conv::X86Intr => llvm::X86_Intr,
-            Conv::X86Stdcall => llvm::X86StdcallCallConv,
-            Conv::X86ThisCall => llvm::X86_ThisCall,
-            Conv::X86VectorCall => llvm::X86_VectorCall,
-            Conv::X86_64SysV => llvm::X86_64_SysV,
-            Conv::X86_64Win64 => llvm::X86_64_Win64,
+            CanonAbi::Interrupt(interrupt_kind) => match interrupt_kind {
+                InterruptKind::Avr => llvm::AvrInterrupt,
+                InterruptKind::AvrNonBlocking => llvm::AvrNonBlockingInterrupt,
+                InterruptKind::Msp430 => llvm::Msp430Intr,
+                InterruptKind::RiscvMachine | InterruptKind::RiscvSupervisor => llvm::CCallConv,
+                InterruptKind::X86 => llvm::X86_Intr,
+            },
+            CanonAbi::Arm(arm_call) => match arm_call {
+                ArmCall::Aapcs => llvm::ArmAapcsCallConv,
+                ArmCall::CCmseNonSecureCall | ArmCall::CCmseNonSecureEntry => llvm::CCallConv,
+            },
+            CanonAbi::X86(x86_call) => match x86_call {
+                X86Call::Fastcall => llvm::X86FastcallCallConv,
+                X86Call::Stdcall => llvm::X86StdcallCallConv,
+                X86Call::SysV64 => llvm::X86_64_SysV,
+                X86Call::Thiscall => llvm::X86_ThisCall,
+                X86Call::Vectorcall => llvm::X86_VectorCall,
+                X86Call::Win64 => llvm::X86_64_Win64,
+            },
         }
     }
 }
diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs
index fcb55a04635..ec006b59192 100644
--- a/compiler/rustc_codegen_llvm/src/builder.rs
+++ b/compiler/rustc_codegen_llvm/src/builder.rs
@@ -1452,9 +1452,15 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
 impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> {
     fn get_static(&mut self, def_id: DefId) -> &'ll Value {
         // Forward to the `get_static` method of `CodegenCx`
-        let s = self.cx().get_static(def_id);
-        // Cast to default address space if globals are in a different addrspace
-        self.cx().const_pointercast(s, self.type_ptr())
+        let global = self.cx().get_static(def_id);
+        if self.cx().tcx.is_thread_local_static(def_id) {
+            let pointer = self.call_intrinsic("llvm.threadlocal.address", &[global]);
+            // Cast to default address space if globals are in a different addrspace
+            self.pointercast(pointer, self.type_ptr())
+        } else {
+            // Cast to default address space if globals are in a different addrspace
+            self.cx().const_pointercast(global, self.type_ptr())
+        }
     }
 }
 
@@ -1809,8 +1815,11 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
             let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap();
             let dbg_loc = self.get_dbg_loc();
 
-            // Test whether the function pointer is associated with the type identifier.
-            let cond = self.type_test(llfn, typeid_metadata);
+            // Test whether the function pointer is associated with the type identifier using the
+            // llvm.type.test intrinsic. The LowerTypeTests link-time optimization pass replaces
+            // calls to this intrinsic with code to test type membership.
+            let typeid = self.get_metadata_value(typeid_metadata);
+            let cond = self.call_intrinsic("llvm.type.test", &[llfn, typeid]);
             let bb_pass = self.append_sibling_block("type_test.pass");
             let bb_fail = self.append_sibling_block("type_test.fail");
             self.cond_br(cond, bb_pass, bb_fail);
diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs
index 4234352c93a..73def2711dc 100644
--- a/compiler/rustc_codegen_llvm/src/consts.rs
+++ b/compiler/rustc_codegen_llvm/src/consts.rs
@@ -541,12 +541,12 @@ impl<'ll> CodegenCx<'ll, '_> {
                 // in the handling of `.init_array` (the static constructor list) in versions of
                 // the gold linker (prior to the one released with binutils 2.36).
                 //
-                // That said, we only ever emit these when compiling for ELF targets, unless
-                // `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage
-                // on other targets, in particular MachO targets have *their* static constructor
-                // lists broken if `llvm.compiler.used` is emitted rather than `llvm.used`. However,
-                // that check happens when assigning the `CodegenFnAttrFlags` in
-                // `rustc_hir_analysis`, so we don't need to take care of it here.
+                // That said, we only ever emit these when `#[used(compiler)]` is explicitly
+                // requested. This is to avoid similar breakage on other targets, in particular
+                // MachO targets have *their* static constructor lists broken if `llvm.compiler.used`
+                // is emitted rather than `llvm.used`. However, that check happens when assigning
+                // the `CodegenFnAttrFlags` in the `codegen_fn_attrs` query, so we don't need to
+                // take care of it here.
                 self.add_compiler_used_global(g);
             }
             if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) {
diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs
index 8cc2cb9c333..8d6e1d8941b 100644
--- a/compiler/rustc_codegen_llvm/src/context.rs
+++ b/compiler/rustc_codegen_llvm/src/context.rs
@@ -1243,6 +1243,7 @@ impl<'ll> CodegenCx<'ll, '_> {
         }
 
         ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr);
+        ifn!("llvm.threadlocal.address", fn(ptr) -> ptr);
 
         None
     }
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
index e9574108696..a5c80895741 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs
@@ -1,7 +1,7 @@
 use std::borrow::Cow;
 
 use libc::c_uint;
-use rustc_abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants};
+use rustc_abi::{Align, Endian, FieldIdx, Size, TagEncoding, VariantIdx, Variants};
 use rustc_codegen_ssa::debuginfo::type_names::compute_debuginfo_type_name;
 use rustc_codegen_ssa::debuginfo::{tag_base_type, wants_c_like_enum_debuginfo};
 use rustc_codegen_ssa::traits::{ConstCodegenMethods, MiscCodegenMethods};
@@ -401,7 +401,7 @@ fn build_union_fields_for_enum<'ll, 'tcx>(
     enum_type_and_layout: TyAndLayout<'tcx>,
     enum_type_di_node: &'ll DIType,
     variant_indices: impl Iterator<Item = VariantIdx> + Clone,
-    tag_field: usize,
+    tag_field: FieldIdx,
     untagged_variant_index: Option<VariantIdx>,
 ) -> SmallVec<&'ll DIType> {
     let tag_base_type = tag_base_type(cx.tcx, enum_type_and_layout);
@@ -805,7 +805,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>(
     variant_field_infos: &[VariantFieldInfo<'ll>],
     discr_type_di_node: &'ll DIType,
     tag_base_type: Ty<'tcx>,
-    tag_field: usize,
+    tag_field: FieldIdx,
     untagged_variant_index: Option<VariantIdx>,
     di_flags: DIFlags,
 ) -> SmallVec<&'ll DIType> {
@@ -858,7 +858,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>(
     }));
 
     assert_eq!(
-        cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
+        cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field.as_usize()).ty),
         cx.size_and_align_of(self::tag_base_type(cx.tcx, enum_type_and_layout))
     );
 
@@ -875,7 +875,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>(
             Endian::Big => (8, 0),
         };
 
-        let tag_field_offset = enum_type_and_layout.fields.offset(tag_field).bytes();
+        let tag_field_offset = enum_type_and_layout.fields.offset(tag_field.as_usize()).bytes();
         let lo_offset = Size::from_bytes(tag_field_offset + lo_offset);
         let hi_offset = Size::from_bytes(tag_field_offset + hi_offset);
 
@@ -905,8 +905,8 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>(
             cx,
             enum_type_di_node,
             TAG_FIELD_NAME,
-            enum_type_and_layout.field(cx, tag_field),
-            enum_type_and_layout.fields.offset(tag_field),
+            enum_type_and_layout.field(cx, tag_field.as_usize()),
+            enum_type_and_layout.fields.offset(tag_field.as_usize()),
             di_flags,
             tag_base_type_di_node,
             None,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
index 20a841f2287..62d38d463ab 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -373,7 +373,7 @@ fn build_discr_member_di_node<'ll, 'tcx>(
                 file,
                 UNKNOWN_LINE_NUMBER,
                 layout,
-                enum_or_coroutine_type_and_layout.fields.offset(tag_field),
+                enum_or_coroutine_type_and_layout.fields.offset(tag_field.as_usize()),
                 DIFlags::FlagArtificial,
                 ty,
             ))
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
index c5085927923..5ca2505cec4 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs
@@ -147,6 +147,12 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) {
     }
 }
 
+impl<'ll> Builder<'_, 'll, '_> {
+    pub(crate) fn get_dbg_loc(&self) -> Option<&'ll DILocation> {
+        unsafe { llvm::LLVMGetCurrentDebugLocation2(self.llbuilder) }
+    }
+}
+
 impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> {
     // FIXME(eddyb) find a common convention for all of the debuginfo-related
     // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
@@ -209,10 +215,6 @@ impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> {
         }
     }
 
-    fn get_dbg_loc(&self) -> Option<&'ll DILocation> {
-        unsafe { llvm::LLVMGetCurrentDebugLocation2(self.llbuilder) }
-    }
-
     fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
         gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
     }
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index e8629aeebb9..10697b9a71f 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -169,19 +169,9 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
         span: Span,
     ) -> Result<(), ty::Instance<'tcx>> {
         let tcx = self.tcx;
-        let callee_ty = instance.ty(tcx, self.typing_env());
 
-        let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else {
-            bug!("expected fn item type, found {}", callee_ty);
-        };
-
-        let sig = callee_ty.fn_sig(tcx);
-        let sig = tcx.normalize_erasing_late_bound_regions(self.typing_env(), sig);
-        let arg_tys = sig.inputs();
-        let ret_ty = sig.output();
-        let name = tcx.item_name(def_id);
-
-        let llret_ty = self.layout_of(ret_ty).llvm_type(self);
+        let name = tcx.item_name(instance.def_id());
+        let fn_args = instance.args;
 
         let simple = get_simple_intrinsic(self, name);
         let llval = match name {
@@ -265,22 +255,22 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     BackendRepr::Scalar(scalar) => {
                         match scalar.primitive() {
                             Primitive::Int(..) => {
-                                if self.cx().size_of(ret_ty).bytes() < 4 {
+                                if self.cx().size_of(result.layout.ty).bytes() < 4 {
                                     // `va_arg` should not be called on an integer type
                                     // less than 4 bytes in length. If it is, promote
                                     // the integer to an `i32` and truncate the result
                                     // back to the smaller type.
                                     let promoted_result = emit_va_arg(self, args[0], tcx.types.i32);
-                                    self.trunc(promoted_result, llret_ty)
+                                    self.trunc(promoted_result, result.layout.llvm_type(self))
                                 } else {
-                                    emit_va_arg(self, args[0], ret_ty)
+                                    emit_va_arg(self, args[0], result.layout.ty)
                                 }
                             }
                             Primitive::Float(Float::F16) => {
                                 bug!("the va_arg intrinsic does not work with `f16`")
                             }
                             Primitive::Float(Float::F64) | Primitive::Pointer(_) => {
-                                emit_va_arg(self, args[0], ret_ty)
+                                emit_va_arg(self, args[0], result.layout.ty)
                             }
                             // `va_arg` should never be used with the return type f32.
                             Primitive::Float(Float::F32) => {
@@ -384,7 +374,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
             | sym::rotate_right
             | sym::saturating_add
             | sym::saturating_sub => {
-                let ty = arg_tys[0];
+                let ty = args[0].layout.ty;
                 if !ty.is_integral() {
                     tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
                         span,
@@ -403,26 +393,26 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                             &[args[0].immediate(), y],
                         );
 
-                        self.intcast(ret, llret_ty, false)
+                        self.intcast(ret, result.layout.llvm_type(self), false)
                     }
                     sym::ctlz_nonzero => {
                         let y = self.const_bool(true);
                         let llvm_name = &format!("llvm.ctlz.i{width}");
                         let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
-                        self.intcast(ret, llret_ty, false)
+                        self.intcast(ret, result.layout.llvm_type(self), false)
                     }
                     sym::cttz_nonzero => {
                         let y = self.const_bool(true);
                         let llvm_name = &format!("llvm.cttz.i{width}");
                         let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]);
-                        self.intcast(ret, llret_ty, false)
+                        self.intcast(ret, result.layout.llvm_type(self), false)
                     }
                     sym::ctpop => {
                         let ret = self.call_intrinsic(
                             &format!("llvm.ctpop.i{width}"),
                             &[args[0].immediate()],
                         );
-                        self.intcast(ret, llret_ty, false)
+                        self.intcast(ret, result.layout.llvm_type(self), false)
                     }
                     sym::bswap => {
                         if width == 8 {
@@ -554,16 +544,16 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 // Unpack non-power-of-2 #[repr(packed, simd)] arguments.
                 // This gives them the expected layout of a regular #[repr(simd)] vector.
                 let mut loaded_args = Vec::new();
-                for (ty, arg) in arg_tys.iter().zip(args) {
+                for arg in args {
                     loaded_args.push(
                         // #[repr(packed, simd)] vectors are passed like arrays (as references,
                         // with reduced alignment and no padding) rather than as immediates.
                         // We can use a vector load to fix the layout and turn the argument
                         // into an immediate.
-                        if ty.is_simd()
+                        if arg.layout.ty.is_simd()
                             && let OperandValue::Ref(place) = arg.val
                         {
-                            let (size, elem_ty) = ty.simd_size_and_type(self.tcx());
+                            let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx());
                             let elem_ll_ty = match elem_ty.kind() {
                                 ty::Float(f) => self.type_float_from_ty(*f),
                                 ty::Int(i) => self.type_int_from_ty(*i),
@@ -580,10 +570,10 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     );
                 }
 
-                let llret_ty = if ret_ty.is_simd()
-                    && let BackendRepr::Memory { .. } = self.layout_of(ret_ty).layout.backend_repr
+                let llret_ty = if result.layout.ty.is_simd()
+                    && let BackendRepr::Memory { .. } = result.layout.backend_repr
                 {
-                    let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
+                    let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx());
                     let elem_ll_ty = match elem_ty.kind() {
                         ty::Float(f) => self.type_float_from_ty(*f),
                         ty::Int(i) => self.type_int_from_ty(*i),
@@ -593,16 +583,15 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                     };
                     self.type_vector(elem_ll_ty, size)
                 } else {
-                    llret_ty
+                    result.layout.llvm_type(self)
                 };
 
                 match generic_simd_intrinsic(
                     self,
                     name,
-                    callee_ty,
                     fn_args,
                     &loaded_args,
-                    ret_ty,
+                    result.layout.ty,
                     llret_ty,
                     span,
                 ) {
@@ -621,9 +610,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
         };
 
         if result.layout.ty.is_bool() {
-            OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
-                .val
-                .store(self, result);
+            let val = self.from_immediate(llval);
+            self.store_to_place(val, result.val);
         } else if !result.layout.ty.is_unit() {
             self.store_to_place(llval, result.val);
         }
@@ -648,13 +636,6 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
         }
     }
 
-    fn type_test(&mut self, pointer: Self::Value, typeid: Self::Metadata) -> Self::Value {
-        // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
-        // optimization pass replaces calls to this intrinsic with code to test type membership.
-        let typeid = self.get_metadata_value(typeid);
-        self.call_intrinsic("llvm.type.test", &[pointer, typeid])
-    }
-
     fn type_checked_load(
         &mut self,
         llvtable: &'ll Value,
@@ -1151,7 +1132,6 @@ fn get_rust_try_fn<'a, 'll, 'tcx>(
 fn generic_simd_intrinsic<'ll, 'tcx>(
     bx: &mut Builder<'_, 'll, 'tcx>,
     name: Symbol,
-    callee_ty: Ty<'tcx>,
     fn_args: GenericArgsRef<'tcx>,
     args: &[OperandRef<'tcx, &'ll Value>],
     ret_ty: Ty<'tcx>,
@@ -1222,26 +1202,22 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len))
     }
 
-    let tcx = bx.tcx();
-    let sig = tcx.normalize_erasing_late_bound_regions(bx.typing_env(), callee_ty.fn_sig(tcx));
-    let arg_tys = sig.inputs();
-
     // Sanity-check: all vector arguments must be immediates.
     if cfg!(debug_assertions) {
-        for (ty, arg) in arg_tys.iter().zip(args) {
-            if ty.is_simd() {
+        for arg in args {
+            if arg.layout.ty.is_simd() {
                 assert_matches!(arg.val, OperandValue::Immediate(_));
             }
         }
     }
 
     if name == sym::simd_select_bitmask {
-        let (len, _) = require_simd!(arg_tys[1], SimdArgument);
+        let (len, _) = require_simd!(args[1].layout.ty, SimdArgument);
 
         let expected_int_bits = len.max(8).next_power_of_two();
         let expected_bytes = len.div_ceil(8);
 
-        let mask_ty = arg_tys[0];
+        let mask_ty = args[0].layout.ty;
         let mask = match mask_ty.kind() {
             ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
             ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(),
@@ -1275,8 +1251,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     }
 
     // every intrinsic below takes a SIMD vector as its first argument
-    let (in_len, in_elem) = require_simd!(arg_tys[0], SimdInput);
-    let in_ty = arg_tys[0];
+    let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput);
+    let in_ty = args[0].layout.ty;
 
     let comparison = match name {
         sym::simd_eq => Some(BinOp::Eq),
@@ -1407,13 +1383,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
 
     if name == sym::simd_insert || name == sym::simd_insert_dyn {
         require!(
-            in_elem == arg_tys[2],
+            in_elem == args[2].layout.ty,
             InvalidMonomorphization::InsertedType {
                 span,
                 name,
                 in_elem,
                 in_ty,
-                out_ty: arg_tys[2]
+                out_ty: args[2].layout.ty
             }
         );
 
@@ -1464,7 +1440,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
     if name == sym::simd_select {
         let m_elem_ty = in_elem;
         let m_len = in_len;
-        let (v_len, _) = require_simd!(arg_tys[1], SimdArgument);
+        let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument);
         require!(
             m_len == v_len,
             InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
@@ -1665,9 +1641,9 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         // The second argument must be a simd vector with an element type that's a pointer
         // to the element type of the first argument
         let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
-        let (out_len, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
+        let (out_len, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
         // The element type of the third argument must be a signed integer type of any width:
-        let (out_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
+        let (out_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
         require_simd!(ret_ty, SimdReturn);
 
         // Of the same length:
@@ -1678,7 +1654,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 name,
                 in_len,
                 in_ty,
-                arg_ty: arg_tys[1],
+                arg_ty: args[1].layout.ty,
                 out_len
             }
         );
@@ -1689,7 +1665,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 name,
                 in_len,
                 in_ty,
-                arg_ty: arg_tys[2],
+                arg_ty: args[2].layout.ty,
                 out_len: out_len2
             }
         );
@@ -1709,7 +1685,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 span,
                 name,
                 expected_element: element_ty1,
-                second_arg: arg_tys[1],
+                second_arg: args[1].layout.ty,
                 in_elem,
                 in_ty,
                 mutability: ExpectedPointerMutability::Not,
@@ -1770,10 +1746,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         let (mask_len, mask_elem) = (in_len, in_elem);
 
         // The second argument must be a pointer matching the element type
-        let pointer_ty = arg_tys[1];
+        let pointer_ty = args[1].layout.ty;
 
         // The last argument is a passthrough vector providing values for disabled lanes
-        let values_ty = arg_tys[2];
+        let values_ty = args[2].layout.ty;
         let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
 
         require_simd!(ret_ty, SimdReturn);
@@ -1861,10 +1837,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         let (mask_len, mask_elem) = (in_len, in_elem);
 
         // The second argument must be a pointer matching the element type
-        let pointer_ty = arg_tys[1];
+        let pointer_ty = args[1].layout.ty;
 
         // The last argument specifies the values to store to memory
-        let values_ty = arg_tys[2];
+        let values_ty = args[2].layout.ty;
         let (values_len, values_elem) = require_simd!(values_ty, SimdThird);
 
         // Of the same length:
@@ -1944,8 +1920,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         // The second argument must be a simd vector with an element type that's a pointer
         // to the element type of the first argument
         let (_, element_ty0) = require_simd!(in_ty, SimdFirst);
-        let (element_len1, element_ty1) = require_simd!(arg_tys[1], SimdSecond);
-        let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird);
+        let (element_len1, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond);
+        let (element_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird);
 
         // Of the same length:
         require!(
@@ -1955,7 +1931,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 name,
                 in_len,
                 in_ty,
-                arg_ty: arg_tys[1],
+                arg_ty: args[1].layout.ty,
                 out_len: element_len1
             }
         );
@@ -1966,7 +1942,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 name,
                 in_len,
                 in_ty,
-                arg_ty: arg_tys[2],
+                arg_ty: args[2].layout.ty,
                 out_len: element_len2
             }
         );
@@ -1981,7 +1957,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 span,
                 name,
                 expected_element: element_ty1,
-                second_arg: arg_tys[1],
+                second_arg: args[1].layout.ty,
                 in_elem,
                 in_ty,
                 mutability: ExpectedPointerMutability::Mut,
@@ -2503,7 +2479,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
         let ptrs = args[0].immediate();
         // The second argument must be a ptr-sized integer.
         // (We don't care about the signedness, this is wrapping anyway.)
-        let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx());
+        let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx());
         if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) {
             span_bug!(
                 span,
@@ -2527,8 +2503,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
                 return_error!(InvalidMonomorphization::ExpectedVectorElementType {
                     span,
                     name,
-                    expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1,
-                    vector_type: arg_tys[0]
+                    expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1,
+                    vector_type: args[0].layout.ty
                 });
             }
         };
diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs
index 8f57f0983ab..9718c95f38a 100644
--- a/compiler/rustc_codegen_llvm/src/llvm_util.rs
+++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs
@@ -282,6 +282,14 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea
         }
         // Filter out features that are not supported by the current LLVM version
         ("riscv32" | "riscv64", "zacas") if get_version().0 < 20 => None,
+        (
+            "s390x",
+            "message-security-assist-extension12"
+            | "concurrent-functions"
+            | "miscellaneous-extensions-4"
+            | "vector-enhancements-3"
+            | "vector-packed-decimal-enhancement-3",
+        ) if get_version().0 < 20 => None,
         // Enable the evex512 target feature if an avx512 target feature is enabled.
         ("x86", s) if s.starts_with("avx512") => Some(LLVMFeature::with_dependencies(
             s,
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index 8eedb5392b5..236568590be 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -40,6 +40,7 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
     align: Align,
     slot_size: Align,
     allow_higher_align: bool,
+    force_right_adjust: bool,
 ) -> (&'ll Value, Align) {
     let va_list_ty = bx.type_ptr();
     let va_list_addr = list.immediate();
@@ -57,7 +58,10 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
     let next = bx.inbounds_ptradd(addr, full_direct_size);
     bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
 
-    if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
+    if size.bytes() < slot_size.bytes()
+        && bx.tcx().sess.target.endian == Endian::Big
+        && force_right_adjust
+    {
         let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
         let adjusted = bx.inbounds_ptradd(addr, adjusted_size);
         (adjusted, addr_align)
@@ -81,6 +85,11 @@ enum AllowHigherAlign {
     Yes,
 }
 
+enum ForceRightAdjust {
+    No,
+    Yes,
+}
+
 fn emit_ptr_va_arg<'ll, 'tcx>(
     bx: &mut Builder<'_, 'll, 'tcx>,
     list: OperandRef<'tcx, &'ll Value>,
@@ -88,9 +97,11 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
     pass_mode: PassMode,
     slot_size: SlotSize,
     allow_higher_align: AllowHigherAlign,
+    force_right_adjust: ForceRightAdjust,
 ) -> &'ll Value {
     let indirect = matches!(pass_mode, PassMode::Indirect);
     let allow_higher_align = matches!(allow_higher_align, AllowHigherAlign::Yes);
+    let force_right_adjust = matches!(force_right_adjust, ForceRightAdjust::Yes);
     let slot_size = Align::from_bytes(slot_size as u64).unwrap();
 
     let layout = bx.cx.layout_of(target_ty);
@@ -103,8 +114,15 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
     } else {
         (layout.llvm_type(bx.cx), layout.size, layout.align)
     };
-    let (addr, addr_align) =
-        emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align);
+    let (addr, addr_align) = emit_direct_ptr_va_arg(
+        bx,
+        list,
+        size,
+        align.abi,
+        slot_size,
+        allow_higher_align,
+        force_right_adjust,
+    );
     if indirect {
         let tmp_ret = bx.load(llty, addr, addr_align);
         bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
@@ -208,6 +226,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
         PassMode::Direct,
         SlotSize::Bytes8,
         AllowHigherAlign::Yes,
+        ForceRightAdjust::No,
     );
     bx.br(end);
 
@@ -218,6 +237,150 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
     val
 }
 
+fn emit_powerpc_va_arg<'ll, 'tcx>(
+    bx: &mut Builder<'_, 'll, 'tcx>,
+    list: OperandRef<'tcx, &'ll Value>,
+    target_ty: Ty<'tcx>,
+) -> &'ll Value {
+    let dl = bx.cx.data_layout();
+
+    // struct __va_list_tag {
+    //   unsigned char gpr;
+    //   unsigned char fpr;
+    //   unsigned short reserved;
+    //   void *overflow_arg_area;
+    //   void *reg_save_area;
+    // };
+    let va_list_addr = list.immediate();
+
+    // Peel off any newtype wrappers.
+    let layout = {
+        let mut layout = bx.cx.layout_of(target_ty);
+
+        while let Some((_, inner)) = layout.non_1zst_field(bx.cx) {
+            layout = inner;
+        }
+
+        layout
+    };
+
+    // Rust does not currently support any powerpc softfloat targets.
+    let target = &bx.cx.tcx.sess.target;
+    let is_soft_float_abi = target.abi == "softfloat";
+    assert!(!is_soft_float_abi);
+
+    // All instances of VaArgSafe are passed directly.
+    let is_indirect = false;
+
+    let (is_i64, is_int, is_f64) = match layout.layout.backend_repr() {
+        BackendRepr::Scalar(scalar) => match scalar.primitive() {
+            rustc_abi::Primitive::Int(integer, _) => (integer.size().bits() == 64, true, false),
+            rustc_abi::Primitive::Float(float) => (false, false, float.size().bits() == 64),
+            rustc_abi::Primitive::Pointer(_) => (false, true, false),
+        },
+        _ => unreachable!("all instances of VaArgSafe are represented as scalars"),
+    };
+
+    let num_regs_addr = if is_int || is_soft_float_abi {
+        va_list_addr // gpr
+    } else {
+        bx.inbounds_ptradd(va_list_addr, bx.const_usize(1)) // fpr
+    };
+
+    let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align.abi);
+
+    // "Align" the register count when the type is passed as `i64`.
+    if is_i64 || (is_f64 && is_soft_float_abi) {
+        num_regs = bx.add(num_regs, bx.const_u8(1));
+        num_regs = bx.and(num_regs, bx.const_u8(0b1111_1110));
+    }
+
+    let max_regs = 8u8;
+    let use_regs = bx.icmp(IntPredicate::IntULT, num_regs, bx.const_u8(max_regs));
+
+    let in_reg = bx.append_sibling_block("va_arg.in_reg");
+    let in_mem = bx.append_sibling_block("va_arg.in_mem");
+    let end = bx.append_sibling_block("va_arg.end");
+
+    bx.cond_br(use_regs, in_reg, in_mem);
+
+    let reg_addr = {
+        bx.switch_to_block(in_reg);
+
+        let reg_safe_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2 + 4));
+        let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, dl.pointer_align.abi);
+
+        // Floating-point registers start after the general-purpose registers.
+        if !is_int && !is_soft_float_abi {
+            reg_addr = bx.inbounds_ptradd(reg_addr, bx.cx.const_usize(32))
+        }
+
+        // Get the address of the saved value by scaling the number of
+        // registers we've used by the number of.
+        let reg_size = if is_int || is_soft_float_abi { 4 } else { 8 };
+        let reg_offset = bx.mul(num_regs, bx.cx().const_u8(reg_size));
+        let reg_addr = bx.inbounds_ptradd(reg_addr, reg_offset);
+
+        // Increase the used-register count.
+        let reg_incr = if is_i64 || (is_f64 && is_soft_float_abi) { 2 } else { 1 };
+        let new_num_regs = bx.add(num_regs, bx.cx.const_u8(reg_incr));
+        bx.store(new_num_regs, num_regs_addr, dl.i8_align.abi);
+
+        bx.br(end);
+
+        reg_addr
+    };
+
+    let mem_addr = {
+        bx.switch_to_block(in_mem);
+
+        bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align.abi);
+
+        // Everything in the overflow area is rounded up to a size of at least 4.
+        let overflow_area_align = Align::from_bytes(4).unwrap();
+
+        let size = if !is_indirect {
+            layout.layout.size.align_to(overflow_area_align)
+        } else {
+            dl.pointer_size
+        };
+
+        let overflow_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2));
+        let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, dl.pointer_align.abi);
+
+        // Round up address of argument to alignment
+        if layout.layout.align.abi > overflow_area_align {
+            overflow_area = round_pointer_up_to_alignment(
+                bx,
+                overflow_area,
+                layout.layout.align.abi,
+                bx.type_ptr(),
+            );
+        }
+
+        let mem_addr = overflow_area;
+
+        // Increase the overflow area.
+        overflow_area = bx.inbounds_ptradd(overflow_area, bx.const_usize(size.bytes()));
+        bx.store(overflow_area, overflow_area_ptr, dl.pointer_align.abi);
+
+        bx.br(end);
+
+        mem_addr
+    };
+
+    // Return the appropriate result.
+    bx.switch_to_block(end);
+    let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
+    let val_type = layout.llvm_type(bx);
+    let val_addr = if is_indirect {
+        bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi)
+    } else {
+        val_addr
+    };
+    bx.load(val_type, val_addr, layout.align.abi)
+}
+
 fn emit_s390x_va_arg<'ll, 'tcx>(
     bx: &mut Builder<'_, 'll, 'tcx>,
     list: OperandRef<'tcx, &'ll Value>,
@@ -728,6 +891,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
             PassMode::Direct,
             SlotSize::Bytes4,
             if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes },
+            ForceRightAdjust::No,
         ),
         "aarch64" | "arm64ec" if target.is_like_windows || target.is_like_darwin => {
             emit_ptr_va_arg(
@@ -737,10 +901,24 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
                 PassMode::Direct,
                 SlotSize::Bytes8,
                 if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes },
+                ForceRightAdjust::No,
             )
         }
         "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty),
         "s390x" => emit_s390x_va_arg(bx, addr, target_ty),
+        "powerpc" => emit_powerpc_va_arg(bx, addr, target_ty),
+        "powerpc64" | "powerpc64le" => emit_ptr_va_arg(
+            bx,
+            addr,
+            target_ty,
+            PassMode::Direct,
+            SlotSize::Bytes8,
+            AllowHigherAlign::Yes,
+            match &*target.arch {
+                "powerpc64" => ForceRightAdjust::Yes,
+                _ => ForceRightAdjust::No,
+            },
+        ),
         // Windows x86_64
         "x86_64" if target.is_like_windows => {
             let target_ty_size = bx.cx.size_of(target_ty).bytes();
@@ -755,6 +933,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>(
                 },
                 SlotSize::Bytes8,
                 AllowHigherAlign::No,
+                ForceRightAdjust::No,
             )
         }
         // This includes `target.is_like_darwin`, which on x86_64 targets is like sysv64.