about summary refs log tree commit diff
path: root/compiler/rustc_codegen_gcc/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_gcc/src')
-rw-r--r--compiler/rustc_codegen_gcc/src/abi.rs47
-rw-r--r--compiler/rustc_codegen_gcc/src/asm.rs291
-rw-r--r--compiler/rustc_codegen_gcc/src/back/write.rs29
-rw-r--r--compiler/rustc_codegen_gcc/src/base.rs6
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs57
-rw-r--r--compiler/rustc_codegen_gcc/src/common.rs11
-rw-r--r--compiler/rustc_codegen_gcc/src/consts.rs8
-rw-r--r--compiler/rustc_codegen_gcc/src/context.rs41
-rw-r--r--compiler/rustc_codegen_gcc/src/debuginfo.rs17
-rw-r--r--compiler/rustc_codegen_gcc/src/declare.rs25
-rw-r--r--compiler/rustc_codegen_gcc/src/gcc_util.rs17
-rw-r--r--compiler/rustc_codegen_gcc/src/int.rs17
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs287
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs1
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/simd.rs35
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs28
-rw-r--r--compiler/rustc_codegen_gcc/src/type_of.rs14
17 files changed, 635 insertions, 296 deletions
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs
index 9fe6baa3d25..a96b18e01c0 100644
--- a/compiler/rustc_codegen_gcc/src/abi.rs
+++ b/compiler/rustc_codegen_gcc/src/abi.rs
@@ -9,6 +9,8 @@ use rustc_middle::ty::Ty;
 use rustc_middle::ty::layout::LayoutOf;
 #[cfg(feature = "master")]
 use rustc_session::config;
+#[cfg(feature = "master")]
+use rustc_target::callconv::Conv;
 use rustc_target::callconv::{ArgAttributes, CastTarget, FnAbi, PassMode};
 
 use crate::builder::Builder;
@@ -105,6 +107,8 @@ pub trait FnAbiGccExt<'gcc, 'tcx> {
     // TODO(antoyo): return a function pointer type instead?
     fn gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> FnAbiGcc<'gcc>;
     fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
+    #[cfg(feature = "master")]
+    fn gcc_cconv(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Option<FnAttribute<'gcc>>;
 }
 
 impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
@@ -227,4 +231,47 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
         );
         pointer_type
     }
+
+    #[cfg(feature = "master")]
+    fn gcc_cconv(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Option<FnAttribute<'gcc>> {
+        conv_to_fn_attribute(self.conv, &cx.tcx.sess.target.arch)
+    }
+}
+
+#[cfg(feature = "master")]
+pub fn conv_to_fn_attribute<'gcc>(conv: Conv, arch: &str) -> Option<FnAttribute<'gcc>> {
+    // TODO: handle the calling conventions returning None.
+    let attribute = match conv {
+        Conv::C
+        | Conv::Rust
+        | Conv::CCmseNonSecureCall
+        | Conv::CCmseNonSecureEntry
+        | Conv::RiscvInterrupt { .. } => return None,
+        Conv::Cold => return None,
+        Conv::PreserveMost => return None,
+        Conv::PreserveAll => return None,
+        Conv::GpuKernel => {
+            // TODO(antoyo): remove clippy allow attribute when this is implemented.
+            #[allow(clippy::if_same_then_else)]
+            if arch == "amdgpu" {
+                return None;
+            } else if arch == "nvptx64" {
+                return None;
+            } else {
+                panic!("Architecture {} does not support GpuKernel calling convention", arch);
+            }
+        }
+        Conv::AvrInterrupt => return None,
+        Conv::AvrNonBlockingInterrupt => return None,
+        Conv::ArmAapcs => return None,
+        Conv::Msp430Intr => return None,
+        Conv::X86Fastcall => return None,
+        Conv::X86Intr => return None,
+        Conv::X86Stdcall => return None,
+        Conv::X86ThisCall => return None,
+        Conv::X86VectorCall => return None,
+        Conv::X86_64SysV => FnAttribute::SysvAbi,
+        Conv::X86_64Win64 => FnAttribute::MsAbi,
+    };
+    Some(attribute)
 }
diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs
index 415f8affab9..c35337ae7ce 100644
--- a/compiler/rustc_codegen_gcc/src/asm.rs
+++ b/compiler/rustc_codegen_gcc/src/asm.rs
@@ -36,7 +36,8 @@ use crate::type_of::LayoutGccExt;
 //
 // 3. Clobbers. GCC has a separate list of clobbers, and clobbers don't have indexes.
 //    Contrary, Rust expresses clobbers through "out" operands that aren't tied to
-//    a variable (`_`),  and such "clobbers" do have index.
+//    a variable (`_`),  and such "clobbers" do have index. Input operands cannot also
+//    be clobbered.
 //
 // 4. Furthermore, GCC Extended Asm does not support explicit register constraints
 //    (like `out("eax")`) directly, offering so-called "local register variables"
@@ -161,6 +162,16 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
         // Also, we don't emit any asm operands immediately; we save them to
         // the one of the buffers to be emitted later.
 
+        let mut input_registers = vec![];
+
+        for op in rust_operands {
+            if let InlineAsmOperandRef::In { reg, .. } = *op
+                && let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg)
+            {
+                input_registers.push(reg_name);
+            }
+        }
+
         // 1. Normal variables (and saving operands to buffers).
         for (rust_idx, op) in rust_operands.iter().enumerate() {
             match *op {
@@ -183,25 +194,39 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                             continue;
                         }
                         (Register(reg_name), None) => {
-                            // `clobber_abi` can add lots of clobbers that are not supported by the target,
-                            // such as AVX-512 registers, so we just ignore unsupported registers
-                            let is_target_supported =
-                                reg.reg_class().supported_types(asm_arch, true).iter().any(
-                                    |&(_, feature)| {
-                                        if let Some(feature) = feature {
-                                            self.tcx
-                                                .asm_target_features(instance.def_id())
-                                                .contains(&feature)
-                                        } else {
-                                            true // Register class is unconditionally supported
-                                        }
-                                    },
-                                );
-
-                            if is_target_supported && !clobbers.contains(&reg_name) {
-                                clobbers.push(reg_name);
+                            if input_registers.contains(&reg_name) {
+                                // the `clobber_abi` operand is converted into a series of
+                                // `lateout("reg") _` operands. Of course, a user could also
+                                // explicitly define such an output operand.
+                                //
+                                // GCC does not allow input registers to be clobbered, so if this out register
+                                // is also used as an in register, do not add it to the clobbers list.
+                                // it will be treated as a lateout register with `out_place: None`
+                                if !late {
+                                    bug!("input registers can only be used as lateout regisers");
+                                }
+                                ("r", dummy_output_type(self.cx, reg.reg_class()))
+                            } else {
+                                // `clobber_abi` can add lots of clobbers that are not supported by the target,
+                                // such as AVX-512 registers, so we just ignore unsupported registers
+                                let is_target_supported =
+                                    reg.reg_class().supported_types(asm_arch, true).iter().any(
+                                        |&(_, feature)| {
+                                            if let Some(feature) = feature {
+                                                self.tcx
+                                                    .asm_target_features(instance.def_id())
+                                                    .contains(&feature)
+                                            } else {
+                                                true // Register class is unconditionally supported
+                                            }
+                                        },
+                                    );
+
+                                if is_target_supported && !clobbers.contains(&reg_name) {
+                                    clobbers.push(reg_name);
+                                }
+                                continue;
                             }
-                            continue;
                         }
                     };
 
@@ -230,13 +255,10 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                 }
 
                 InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
-                    let constraint =
-                        if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
-                            constraint
-                        } else {
-                            // left for the next pass
-                            continue;
-                        };
+                    let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) else {
+                        // left for the next pass
+                        continue;
+                    };
 
                     // Rustc frontend guarantees that input and output types are "compatible",
                     // so we can just use input var's type for the output variable.
@@ -589,114 +611,127 @@ fn estimate_template_length(
 }
 
 /// Converts a register class to a GCC constraint code.
-fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
-    let constraint = match reg {
-        // For vector registers LLVM wants the register name to match the type size.
+fn reg_to_gcc(reg_or_reg_class: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
+    match reg_or_reg_class {
         InlineAsmRegOrRegClass::Reg(reg) => {
-            match reg {
-                InlineAsmReg::X86(_) => {
-                    // TODO(antoyo): add support for vector register.
-                    //
-                    // // For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
-                    return ConstraintOrRegister::Register(match reg.name() {
-                        // Some of registers' names does not map 1-1 from rust to gcc
-                        "st(0)" => "st",
+            ConstraintOrRegister::Register(explicit_reg_to_gcc(reg))
+        }
+        InlineAsmRegOrRegClass::RegClass(reg_class) => {
+            ConstraintOrRegister::Constraint(reg_class_to_gcc(reg_class))
+        }
+    }
+}
 
-                        name => name,
-                    });
+fn explicit_reg_to_gcc(reg: InlineAsmReg) -> &'static str {
+    // For explicit registers, we have to create a register variable: https://stackoverflow.com/a/31774784/389119
+    match reg {
+        InlineAsmReg::X86(reg) => {
+            // TODO(antoyo): add support for vector register.
+            match reg.reg_class() {
+                X86InlineAsmRegClass::reg_byte => {
+                    // GCC does not support the `b` suffix, so we just strip it
+                    // see https://github.com/rust-lang/rustc_codegen_gcc/issues/485
+                    reg.name().trim_end_matches('b')
                 }
+                _ => match reg.name() {
+                    // Some of registers' names does not map 1-1 from rust to gcc
+                    "st(0)" => "st",
 
-                _ => unimplemented!(),
+                    name => name,
+                },
             }
         }
-        // They can be retrieved from https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
-        InlineAsmRegOrRegClass::RegClass(reg) => match reg {
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
-            InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
-                unreachable!("clobber-only")
-            }
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
-            | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "t",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
-            InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
-            InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
-            InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::preg) => {
-                unreachable!("clobber-only")
-            }
-            InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
-            InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
-            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r"
-            InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
-            // https://github.com/gcc-mirror/gcc/blob/master/gcc/config/nvptx/nvptx.md -> look for
-            // "define_constraint".
-            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
-            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
-            InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
-
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::vreg) => "v",
-            InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
-            | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
-                unreachable!("clobber-only")
-            }
-            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
-                unreachable!("clobber-only")
-            }
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
-            | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
-            InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "Yk",
-            InlineAsmRegClass::X86(
-                X86InlineAsmRegClass::kreg0
-                | X86InlineAsmRegClass::x87_reg
-                | X86InlineAsmRegClass::mmx_reg
-                | X86InlineAsmRegClass::tmm_reg,
-            ) => unreachable!("clobber-only"),
-            InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
-                bug!("GCC backend does not support SPIR-V")
-            }
-            InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
-            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a",
-            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
-            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::vreg) => "v",
-            InlineAsmRegClass::S390x(S390xInlineAsmRegClass::areg) => {
-                unreachable!("clobber-only")
-            }
-            InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::reg) => "r",
-            InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
-            InlineAsmRegClass::Err => unreachable!(),
-        },
-    };
 
-    ConstraintOrRegister::Constraint(constraint)
+        _ => unimplemented!(),
+    }
+}
+
+/// They can be retrieved from https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
+fn reg_class_to_gcc(reg_class: InlineAsmRegClass) -> &'static str {
+    match reg_class {
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
+        InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
+        | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "t",
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
+        InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
+        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
+        InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::preg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
+        InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r"
+        InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
+        // https://github.com/gcc-mirror/gcc/blob/master/gcc/config/nvptx/nvptx.md -> look for
+        // "define_constraint".
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
+        InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
+
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::vreg) => "v",
+        InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
+        | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
+        | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
+        InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "Yk",
+        InlineAsmRegClass::X86(
+            X86InlineAsmRegClass::kreg0
+            | X86InlineAsmRegClass::x87_reg
+            | X86InlineAsmRegClass::mmx_reg
+            | X86InlineAsmRegClass::tmm_reg,
+        ) => unreachable!("clobber-only"),
+        InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
+            bug!("GCC backend does not support SPIR-V")
+        }
+        InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a",
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::vreg) => "v",
+        InlineAsmRegClass::S390x(S390xInlineAsmRegClass::areg) => {
+            unreachable!("clobber-only")
+        }
+        InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::reg) => "r",
+        InlineAsmRegClass::Sparc(SparcInlineAsmRegClass::yreg) => unreachable!("clobber-only"),
+        InlineAsmRegClass::Err => unreachable!(),
+    }
 }
 
 /// Type to use for outputs that are discarded. It doesn't really matter what
@@ -794,7 +829,7 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
 
 impl<'gcc, 'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
     fn codegen_global_asm(
-        &self,
+        &mut self,
         template: &[InlineAsmTemplatePiece],
         operands: &[GlobalAsmOperandRef<'tcx>],
         options: InlineAsmOptions,
diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs
index 51c5ba73e32..16c895322e8 100644
--- a/compiler/rustc_codegen_gcc/src/back/write.rs
+++ b/compiler/rustc_codegen_gcc/src/back/write.rs
@@ -24,19 +24,23 @@ pub(crate) unsafe fn codegen(
     {
         let context = &module.module_llvm.context;
 
-        let module_name = module.name.clone();
-
         let should_combine_object_files = module.module_llvm.should_combine_object_files;
 
-        let module_name = Some(&module_name[..]);
-
         // NOTE: Only generate object files with GIMPLE when this environment variable is set for
         // now because this requires a particular setup (same gcc/lto1/lto-wrapper commit as libgccjit).
         // TODO(antoyo): remove this environment variable.
         let fat_lto = env::var("EMBED_LTO_BITCODE").as_deref() == Ok("1");
 
-        let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name);
-        let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name);
+        let bc_out = cgcx.output_filenames.temp_path_for_cgu(
+            OutputType::Bitcode,
+            &module.name,
+            cgcx.invocation_temp.as_deref(),
+        );
+        let obj_out = cgcx.output_filenames.temp_path_for_cgu(
+            OutputType::Object,
+            &module.name,
+            cgcx.invocation_temp.as_deref(),
+        );
 
         if config.bitcode_needed() {
             if fat_lto {
@@ -117,14 +121,22 @@ pub(crate) unsafe fn codegen(
         }
 
         if config.emit_ir {
-            let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
+            let out = cgcx.output_filenames.temp_path_for_cgu(
+                OutputType::LlvmAssembly,
+                &module.name,
+                cgcx.invocation_temp.as_deref(),
+            );
             std::fs::write(out, "").expect("write file");
         }
 
         if config.emit_asm {
             let _timer =
                 cgcx.prof.generic_activity_with_arg("GCC_module_codegen_emit_asm", &*module.name);
-            let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
+            let path = cgcx.output_filenames.temp_path_for_cgu(
+                OutputType::Assembly,
+                &module.name,
+                cgcx.invocation_temp.as_deref(),
+            );
             context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
         }
 
@@ -238,6 +250,7 @@ pub(crate) unsafe fn codegen(
         config.emit_asm,
         config.emit_ir,
         &cgcx.output_filenames,
+        cgcx.invocation_temp.as_deref(),
     ))
 }
 
diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs
index 9b495174a3f..a9d7808c833 100644
--- a/compiler/rustc_codegen_gcc/src/base.rs
+++ b/compiler/rustc_codegen_gcc/src/base.rs
@@ -206,7 +206,7 @@ pub fn compile_codegen_unit(
             let f128_type_supported = target_info.supports_target_dependent_type(CType::Float128);
             let u128_type_supported = target_info.supports_target_dependent_type(CType::UInt128t);
             // TODO: improve this to avoid passing that many arguments.
-            let cx = CodegenCx::new(
+            let mut cx = CodegenCx::new(
                 &context,
                 cgu,
                 tcx,
@@ -223,8 +223,8 @@ pub fn compile_codegen_unit(
             }
 
             // ... and now that we have everything pre-defined, fill out those definitions.
-            for &(mono_item, _) in &mono_items {
-                mono_item.define::<Builder<'_, '_, '_>>(&cx);
+            for &(mono_item, item_data) in &mono_items {
+                mono_item.define::<Builder<'_, '_, '_>>(&mut cx, item_data);
             }
 
             // If this codegen unit contains the main function, also create the
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 6573b5b165e..6720f6186d1 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -45,7 +45,7 @@ enum ExtremumOperation {
     Min,
 }
 
-pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
+pub struct Builder<'a, 'gcc, 'tcx> {
     pub cx: &'a CodegenCx<'gcc, 'tcx>,
     pub block: Block<'gcc>,
     pub location: Option<Location<'gcc>>,
@@ -368,16 +368,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let previous_arg_count = args.len();
         let orig_args = args;
         let args = {
-            let function_address_names = self.function_address_names.borrow();
-            let original_function_name = function_address_names.get(&func_ptr);
             func_ptr = llvm::adjust_function(self.context, &func_name, func_ptr, args);
-            llvm::adjust_intrinsic_arguments(
-                self,
-                gcc_func,
-                args.into(),
-                &func_name,
-                original_function_name,
-            )
+            llvm::adjust_intrinsic_arguments(self, gcc_func, args.into(), &func_name)
         };
         let args_adjusted = args.len() != previous_arg_count;
         let args = self.check_ptr_call("call", func_ptr, &args);
@@ -1271,7 +1263,50 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
-        self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs)
+        // LLVM has a concept of "unordered compares", where eg ULT returns true if either the two
+        // arguments are unordered (i.e. either is NaN), or the lhs is less than the rhs. GCC does
+        // not natively have this concept, so in some cases we must manually handle NaNs
+        let must_handle_nan = match op {
+            RealPredicate::RealPredicateFalse => unreachable!(),
+            RealPredicate::RealOEQ => false,
+            RealPredicate::RealOGT => false,
+            RealPredicate::RealOGE => false,
+            RealPredicate::RealOLT => false,
+            RealPredicate::RealOLE => false,
+            RealPredicate::RealONE => false,
+            RealPredicate::RealORD => unreachable!(),
+            RealPredicate::RealUNO => unreachable!(),
+            RealPredicate::RealUEQ => false,
+            RealPredicate::RealUGT => true,
+            RealPredicate::RealUGE => true,
+            RealPredicate::RealULT => true,
+            RealPredicate::RealULE => true,
+            RealPredicate::RealUNE => false,
+            RealPredicate::RealPredicateTrue => unreachable!(),
+        };
+
+        let cmp = self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs);
+
+        if must_handle_nan {
+            let is_nan = self.context.new_binary_op(
+                self.location,
+                BinaryOp::LogicalOr,
+                self.cx.bool_type,
+                // compare a value to itself to check whether it is NaN
+                self.context.new_comparison(self.location, ComparisonOp::NotEquals, lhs, lhs),
+                self.context.new_comparison(self.location, ComparisonOp::NotEquals, rhs, rhs),
+            );
+
+            self.context.new_binary_op(
+                self.location,
+                BinaryOp::LogicalOr,
+                self.cx.bool_type,
+                is_nan,
+                cmp,
+            )
+        } else {
+            cmp
+        }
     }
 
     /* Miscellaneous instructions */
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs
index a63da6b6e27..918195364ff 100644
--- a/compiler/rustc_codegen_gcc/src/common.rs
+++ b/compiler/rustc_codegen_gcc/src/common.rs
@@ -33,12 +33,11 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
     }
 
     pub fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> {
-        if value.get_type() == self.bool_type.make_pointer() {
-            if let Some(pointee) = typ.get_pointee() {
-                if pointee.dyncast_vector().is_some() {
-                    panic!()
-                }
-            }
+        if value.get_type() == self.bool_type.make_pointer()
+            && let Some(pointee) = typ.get_pointee()
+            && pointee.dyncast_vector().is_some()
+        {
+            panic!()
         }
         // NOTE: since bitcast makes a value non-constant, don't bitcast if not necessary as some
         // SIMD builtins require a constant value.
diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs
index acb39374628..0a67bd7bc71 100644
--- a/compiler/rustc_codegen_gcc/src/consts.rs
+++ b/compiler/rustc_codegen_gcc/src/consts.rs
@@ -242,10 +242,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
 
         let global = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
-            if let Some(global) = self.get_declared_value(sym) {
-                if self.val_ty(global) != self.type_ptr_to(gcc_type) {
-                    span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
-                }
+            if let Some(global) = self.get_declared_value(sym)
+                && self.val_ty(global) != self.type_ptr_to(gcc_type)
+            {
+                span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
             }
 
             let is_tls = fn_attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
index 1e1f577bb3a..73718994e64 100644
--- a/compiler/rustc_codegen_gcc/src/context.rs
+++ b/compiler/rustc_codegen_gcc/src/context.rs
@@ -23,6 +23,8 @@ use rustc_target::spec::{
     HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, Target, TlsModel, WasmCAbi, X86Abi,
 };
 
+#[cfg(feature = "master")]
+use crate::abi::conv_to_fn_attribute;
 use crate::callee::get_fn;
 use crate::common::SignType;
 
@@ -213,33 +215,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         let bool_type = context.new_type::<bool>();
 
         let mut functions = FxHashMap::default();
-        let builtins = [
-            "__builtin_unreachable",
-            "abort",
-            "__builtin_expect", /*"__builtin_expect_with_probability",*/
-            "__builtin_constant_p",
-            "__builtin_add_overflow",
-            "__builtin_mul_overflow",
-            "__builtin_saddll_overflow",
-            /*"__builtin_sadd_overflow",*/
-            "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
-            "__builtin_ssubll_overflow",
-            /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow",
-            "__builtin_uaddll_overflow",
-            "__builtin_uadd_overflow",
-            "__builtin_umulll_overflow",
-            "__builtin_umul_overflow",
-            "__builtin_usubll_overflow",
-            "__builtin_usub_overflow",
-            "__builtin_powif",
-            "__builtin_powi",
-            "fabsf",
-            "fabs",
-            "copysignf",
-            "copysign",
-            "nearbyintf",
-            "nearbyint",
-        ];
+        let builtins = ["abort"];
 
         for builtin in builtins.iter() {
             functions.insert(builtin.to_string(), context.get_builtin_function(builtin));
@@ -509,7 +485,11 @@ impl<'gcc, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
     fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> {
         let entry_name = self.sess().target.entry_name.as_ref();
         if !self.functions.borrow().contains_key(entry_name) {
-            Some(self.declare_entry_fn(entry_name, fn_type, ()))
+            #[cfg(feature = "master")]
+            let conv = conv_to_fn_attribute(self.sess().target.entry_abi, &self.sess().target.arch);
+            #[cfg(not(feature = "master"))]
+            let conv = None;
+            Some(self.declare_entry_fn(entry_name, fn_type, conv))
         } else {
             // If the symbol already exists, it is an error: for example, the user wrote
             // #[no_mangle] extern "C" fn main(..) {..}
@@ -605,7 +585,10 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
         let mut name = String::with_capacity(prefix.len() + 6);
         name.push_str(prefix);
         name.push('.');
-        name.push_str(&(idx as u64).to_base(ALPHANUMERIC_ONLY));
+        // Offset the index by the base so that always at least two characters
+        // are generated. This avoids cases where the suffix is interpreted as
+        // size by the assembler (for m68k: .b, .w, .l).
+        name.push_str(&(idx as u64 + ALPHANUMERIC_ONLY as u64).to_base(ALPHANUMERIC_ONLY));
         name
     }
 }
diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs
index 55e01687400..f3ced864395 100644
--- a/compiler/rustc_codegen_gcc/src/debuginfo.rs
+++ b/compiler/rustc_codegen_gcc/src/debuginfo.rs
@@ -126,14 +126,15 @@ fn make_mir_scope<'gcc, 'tcx>(
         return;
     };
 
-    if let Some(ref vars) = *variables {
-        if !vars.contains(scope) && scope_data.inlined.is_none() {
-            // Do not create a DIScope if there are no variables defined in this
-            // MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
-            debug_context.scopes[scope] = parent_scope;
-            instantiated.insert(scope);
-            return;
-        }
+    if let Some(ref vars) = *variables
+        && !vars.contains(scope)
+        && scope_data.inlined.is_none()
+    {
+        // Do not create a DIScope if there are no variables defined in this
+        // MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
+        debug_context.scopes[scope] = parent_scope;
+        instantiated.insert(scope);
+        return;
     }
 
     let loc = cx.lookup_debug_loc(scope_data.span.lo());
diff --git a/compiler/rustc_codegen_gcc/src/declare.rs b/compiler/rustc_codegen_gcc/src/declare.rs
index 7cdbe3c0c62..c1ca3eb849e 100644
--- a/compiler/rustc_codegen_gcc/src/declare.rs
+++ b/compiler/rustc_codegen_gcc/src/declare.rs
@@ -58,7 +58,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         variadic: bool,
     ) -> Function<'gcc> {
         self.linkage.set(FunctionType::Extern);
-        declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic)
+        declare_raw_fn(self, name, None, return_type, params, variadic)
     }
 
     pub fn declare_global(
@@ -92,7 +92,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         &self,
         name: &str,
         _fn_type: Type<'gcc>,
-        callconv: (), /*llvm::CCallConv*/
+        #[cfg(feature = "master")] callconv: Option<FnAttribute<'gcc>>,
+        #[cfg(not(feature = "master"))] callconv: Option<()>,
     ) -> RValue<'gcc> {
         // TODO(antoyo): use the fn_type parameter.
         let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
@@ -123,14 +124,11 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
             #[cfg(feature = "master")]
             fn_attributes,
         } = fn_abi.gcc_type(self);
-        let func = declare_raw_fn(
-            self,
-            name,
-            (), /*fn_abi.llvm_cconv()*/
-            return_type,
-            &arguments_type,
-            is_c_variadic,
-        );
+        #[cfg(feature = "master")]
+        let conv = fn_abi.gcc_cconv(self);
+        #[cfg(not(feature = "master"))]
+        let conv = None;
+        let func = declare_raw_fn(self, name, conv, return_type, &arguments_type, is_c_variadic);
         self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices);
         #[cfg(feature = "master")]
         for fn_attr in fn_attributes {
@@ -162,7 +160,8 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
 fn declare_raw_fn<'gcc>(
     cx: &CodegenCx<'gcc, '_>,
     name: &str,
-    _callconv: (), /*llvm::CallConv*/
+    #[cfg(feature = "master")] callconv: Option<FnAttribute<'gcc>>,
+    #[cfg(not(feature = "master"))] _callconv: Option<()>,
     return_type: Type<'gcc>,
     param_types: &[Type<'gcc>],
     variadic: bool,
@@ -192,6 +191,10 @@ fn declare_raw_fn<'gcc>(
         let name = &mangle_name(name);
         let func =
             cx.context.new_function(None, cx.linkage.get(), return_type, &params, name, variadic);
+        #[cfg(feature = "master")]
+        if let Some(attribute) = callconv {
+            func.add_attribute(attribute);
+        }
         cx.functions.borrow_mut().insert(name.to_string(), func);
 
         #[cfg(feature = "master")]
diff --git a/compiler/rustc_codegen_gcc/src/gcc_util.rs b/compiler/rustc_codegen_gcc/src/gcc_util.rs
index 6eae0c24f48..2b053abdd19 100644
--- a/compiler/rustc_codegen_gcc/src/gcc_util.rs
+++ b/compiler/rustc_codegen_gcc/src/gcc_util.rs
@@ -55,7 +55,7 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri
             )
         } else if let Some(feature) = feature.strip_prefix('-') {
             // FIXME: Why do we not remove implied features on "-" here?
-            // We do the equivalent above in `target_features_cfg`.
+            // We do the equivalent above in `target_config`.
             // See <https://github.com/rust-lang/rust/issues/134792>.
             all_rust_features.push((false, feature));
         } else if !feature.is_empty() && diagnostics {
@@ -136,14 +136,12 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri
         });
     features.extend(feats);
 
-    if diagnostics {
-        if let Some(f) = check_tied_features(sess, &featsmap) {
-            sess.dcx().emit_err(TargetFeatureDisableOrEnable {
-                features: f,
-                span: None,
-                missing_features: None,
-            });
-        }
+    if diagnostics && let Some(f) = check_tied_features(sess, &featsmap) {
+        sess.dcx().emit_err(TargetFeatureDisableOrEnable {
+            features: f,
+            span: None,
+            missing_features: None,
+        });
     }
 
     features
@@ -194,6 +192,7 @@ pub fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]>
 
 fn arch_to_gcc(name: &str) -> &str {
     match name {
+        "M68000" => "68000",
         "M68020" => "68020",
         _ => name,
     }
diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs
index f3552d9b12f..9b5b0fde6e2 100644
--- a/compiler/rustc_codegen_gcc/src/int.rs
+++ b/compiler/rustc_codegen_gcc/src/int.rs
@@ -404,7 +404,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 
         let ret_indirect = matches!(fn_abi.ret.mode, PassMode::Indirect { .. });
 
-        let result = if ret_indirect {
+        let call = if ret_indirect {
             let res_value = self.current_func().new_local(self.location, res_type, "result_value");
             let res_addr = res_value.get_address(self.location);
             let res_param_type = res_type.make_pointer();
@@ -432,8 +432,17 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             );
             self.context.new_call(self.location, func, &[lhs, rhs, overflow_addr])
         };
-
-        (result, self.context.new_cast(self.location, overflow_value, self.bool_type).to_rvalue())
+        // NOTE: we must assign the result of the operation to a variable at this point to make
+        // sure it will be evaluated by libgccjit now.
+        // Otherwise, it will only be evaluated when the rvalue for the call is used somewhere else
+        // and overflow_value will not be initialized at the correct point in the program.
+        let result = self.current_func().new_local(self.location, res_type, "result");
+        self.block.add_assignment(self.location, result, call);
+
+        (
+            result.to_rvalue(),
+            self.context.new_cast(self.location, overflow_value, self.bool_type).to_rvalue(),
+        )
     }
 
     pub fn gcc_icmp(
@@ -865,6 +874,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         let value_type = value.get_type();
         if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type)
         {
+            // TODO: use self.location.
             self.context.new_cast(None, value, dest_typ)
         } else if self.is_native_int_type_or_bool(dest_typ) {
             self.context.new_cast(None, self.low(value), dest_typ)
@@ -905,6 +915,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         let name_suffix = match self.type_kind(dest_typ) {
             TypeKind::Float => "tisf",
             TypeKind::Double => "tidf",
+            TypeKind::FP128 => "tixf",
             kind => panic!("cannot cast a non-native integer to type {:?}", kind),
         };
         let sign = if signed { "" } else { "un" };
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
index 2d731f88d7d..0eebd21001a 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
@@ -1,11 +1,90 @@
 use std::borrow::Cow;
 
-use gccjit::{CType, Context, Function, FunctionPtrType, RValue, ToRValue, UnaryOp};
+use gccjit::{CType, Context, Field, Function, FunctionPtrType, RValue, ToRValue, Type};
 use rustc_codegen_ssa::traits::BuilderMethods;
 
 use crate::builder::Builder;
 use crate::context::CodegenCx;
 
+fn encode_key_128_type<'a, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
+    let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+    let field1 = builder.context.new_field(None, builder.u32_type, "field1");
+    let field2 = builder.context.new_field(None, m128i, "field2");
+    let field3 = builder.context.new_field(None, m128i, "field3");
+    let field4 = builder.context.new_field(None, m128i, "field4");
+    let field5 = builder.context.new_field(None, m128i, "field5");
+    let field6 = builder.context.new_field(None, m128i, "field6");
+    let field7 = builder.context.new_field(None, m128i, "field7");
+    let encode_type = builder.context.new_struct_type(
+        None,
+        "EncodeKey128Output",
+        &[field1, field2, field3, field4, field5, field6, field7],
+    );
+    #[cfg(feature = "master")]
+    encode_type.as_type().set_packed();
+    (encode_type.as_type(), field1, field2)
+}
+
+fn encode_key_256_type<'a, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
+    let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+    let field1 = builder.context.new_field(None, builder.u32_type, "field1");
+    let field2 = builder.context.new_field(None, m128i, "field2");
+    let field3 = builder.context.new_field(None, m128i, "field3");
+    let field4 = builder.context.new_field(None, m128i, "field4");
+    let field5 = builder.context.new_field(None, m128i, "field5");
+    let field6 = builder.context.new_field(None, m128i, "field6");
+    let field7 = builder.context.new_field(None, m128i, "field7");
+    let field8 = builder.context.new_field(None, m128i, "field8");
+    let encode_type = builder.context.new_struct_type(
+        None,
+        "EncodeKey256Output",
+        &[field1, field2, field3, field4, field5, field6, field7, field8],
+    );
+    #[cfg(feature = "master")]
+    encode_type.as_type().set_packed();
+    (encode_type.as_type(), field1, field2)
+}
+
+fn aes_output_type<'a, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
+    let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+    let field1 = builder.context.new_field(None, builder.u8_type, "field1");
+    let field2 = builder.context.new_field(None, m128i, "field2");
+    let aes_output_type = builder.context.new_struct_type(None, "AesOutput", &[field1, field2]);
+    let typ = aes_output_type.as_type();
+    #[cfg(feature = "master")]
+    typ.set_packed();
+    (typ, field1, field2)
+}
+
+fn wide_aes_output_type<'a, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+) -> (Type<'gcc>, Field<'gcc>, Field<'gcc>) {
+    let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+    let field1 = builder.context.new_field(None, builder.u8_type, "field1");
+    let field2 = builder.context.new_field(None, m128i, "field2");
+    let field3 = builder.context.new_field(None, m128i, "field3");
+    let field4 = builder.context.new_field(None, m128i, "field4");
+    let field5 = builder.context.new_field(None, m128i, "field5");
+    let field6 = builder.context.new_field(None, m128i, "field6");
+    let field7 = builder.context.new_field(None, m128i, "field7");
+    let field8 = builder.context.new_field(None, m128i, "field8");
+    let field9 = builder.context.new_field(None, m128i, "field9");
+    let aes_output_type = builder.context.new_struct_type(
+        None,
+        "WideAesOutput",
+        &[field1, field2, field3, field4, field5, field6, field7, field8, field9],
+    );
+    #[cfg(feature = "master")]
+    aes_output_type.as_type().set_packed();
+    (aes_output_type.as_type(), field1, field2)
+}
+
 #[cfg_attr(not(feature = "master"), allow(unused_variables))]
 pub fn adjust_function<'gcc>(
     context: &'gcc Context<'gcc>,
@@ -43,7 +122,6 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
     gcc_func: FunctionPtrType<'gcc>,
     mut args: Cow<'b, [RValue<'gcc>]>,
     func_name: &str,
-    original_function_name: Option<&String>,
 ) -> Cow<'b, [RValue<'gcc>]> {
     // TODO: this might not be a good way to workaround the missing tile builtins.
     if func_name == "__builtin_trap" {
@@ -504,6 +582,72 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
                 let arg4 = builder.context.new_rvalue_from_int(arg4_type, -1);
                 args = vec![a, b, c, arg4, new_args[3]].into();
             }
+            "__builtin_ia32_encodekey128_u32" => {
+                let mut new_args = args.to_vec();
+                let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+                let array_type = builder.context.new_array_type(None, m128i, 6);
+                let result = builder.current_func().new_local(None, array_type, "result");
+                new_args.push(result.get_address(None));
+                args = new_args.into();
+            }
+            "__builtin_ia32_encodekey256_u32" => {
+                let mut new_args = args.to_vec();
+                let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+                let array_type = builder.context.new_array_type(None, m128i, 7);
+                let result = builder.current_func().new_local(None, array_type, "result");
+                new_args.push(result.get_address(None));
+                args = new_args.into();
+            }
+            "__builtin_ia32_aesenc128kl_u8"
+            | "__builtin_ia32_aesdec128kl_u8"
+            | "__builtin_ia32_aesenc256kl_u8"
+            | "__builtin_ia32_aesdec256kl_u8" => {
+                let mut new_args = vec![];
+                let m128i = builder.context.new_vector_type(builder.i64_type, 2);
+                let result = builder.current_func().new_local(None, m128i, "result");
+                new_args.push(result.get_address(None));
+                new_args.extend(args.to_vec());
+                args = new_args.into();
+            }
+            "__builtin_ia32_aesencwide128kl_u8"
+            | "__builtin_ia32_aesdecwide128kl_u8"
+            | "__builtin_ia32_aesencwide256kl_u8"
+            | "__builtin_ia32_aesdecwide256kl_u8" => {
+                let mut new_args = vec![];
+
+                let mut old_args = args.to_vec();
+                let handle = old_args.swap_remove(0); // Called __P in GCC.
+                let first_value = old_args.swap_remove(0);
+
+                let element_type = first_value.get_type();
+                let array_type = builder.context.new_array_type(None, element_type, 8);
+                let result = builder.current_func().new_local(None, array_type, "result");
+                new_args.push(result.get_address(None));
+
+                let array = builder.current_func().new_local(None, array_type, "array");
+                let input = builder.context.new_array_constructor(
+                    None,
+                    array_type,
+                    &[
+                        first_value,
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                        old_args.swap_remove(0),
+                    ],
+                );
+                builder.llbb().add_assignment(None, array, input);
+                let input_ptr = array.get_address(None);
+                let arg2_type = gcc_func.get_param_type(1);
+                let input_ptr = builder.context.new_cast(None, input_ptr, arg2_type);
+                new_args.push(input_ptr);
+
+                new_args.push(handle);
+                args = new_args.into();
+            }
             _ => (),
         }
     } else {
@@ -541,33 +685,6 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
                 let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 2]);
                 args = vec![a, b, c, new_args[3]].into();
             }
-            "__builtin_ia32_vfmaddsubpd256"
-            | "__builtin_ia32_vfmaddsubps"
-            | "__builtin_ia32_vfmaddsubps256"
-            | "__builtin_ia32_vfmaddsubpd" => {
-                if let Some(original_function_name) = original_function_name {
-                    match &**original_function_name {
-                        "llvm.x86.fma.vfmsubadd.pd.256"
-                        | "llvm.x86.fma.vfmsubadd.ps"
-                        | "llvm.x86.fma.vfmsubadd.ps.256"
-                        | "llvm.x86.fma.vfmsubadd.pd" => {
-                            // NOTE: since both llvm.x86.fma.vfmsubadd.ps and llvm.x86.fma.vfmaddsub.ps maps to
-                            // __builtin_ia32_vfmaddsubps, only add minus if this comes from a
-                            // subadd LLVM intrinsic, e.g. _mm256_fmsubadd_pd.
-                            let mut new_args = args.to_vec();
-                            let arg3 = &mut new_args[2];
-                            *arg3 = builder.context.new_unary_op(
-                                None,
-                                UnaryOp::Minus,
-                                arg3.get_type(),
-                                *arg3,
-                            );
-                            args = new_args.into();
-                        }
-                        _ => (),
-                    }
-                }
-            }
             "__builtin_ia32_ldmxcsr" => {
                 // The builtin __builtin_ia32_ldmxcsr takes an integer value while llvm.x86.sse.ldmxcsr takes a pointer,
                 // so dereference the pointer.
@@ -728,6 +845,96 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(
             let f16_type = builder.context.new_c_type(CType::Float16);
             return_value = builder.context.new_cast(None, return_value, f16_type);
         }
+        "__builtin_ia32_encodekey128_u32" => {
+            // The builtin __builtin_ia32_encodekey128_u32 writes the result in its pointer argument while
+            // llvm.x86.encodekey128 returns a value.
+            // We added a result pointer argument and now need to assign its value to the return_value expected by
+            // the LLVM intrinsic.
+            let (encode_type, field1, field2) = encode_key_128_type(builder);
+            let result = builder.current_func().new_local(None, encode_type, "result");
+            let field1 = result.access_field(None, field1);
+            builder.llbb().add_assignment(None, field1, return_value);
+            let field2 = result.access_field(None, field2);
+            let field2_type = field2.to_rvalue().get_type();
+            let array_type = builder.context.new_array_type(None, field2_type, 6);
+            let ptr = builder.context.new_cast(None, args[2], array_type.make_pointer());
+            let field2_ptr =
+                builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
+            builder.llbb().add_assignment(
+                None,
+                field2_ptr.dereference(None),
+                ptr.dereference(None),
+            );
+            return_value = result.to_rvalue();
+        }
+        "__builtin_ia32_encodekey256_u32" => {
+            // The builtin __builtin_ia32_encodekey256_u32 writes the result in its pointer argument while
+            // llvm.x86.encodekey256 returns a value.
+            // We added a result pointer argument and now need to assign its value to the return_value expected by
+            // the LLVM intrinsic.
+            let (encode_type, field1, field2) = encode_key_256_type(builder);
+            let result = builder.current_func().new_local(None, encode_type, "result");
+            let field1 = result.access_field(None, field1);
+            builder.llbb().add_assignment(None, field1, return_value);
+            let field2 = result.access_field(None, field2);
+            let field2_type = field2.to_rvalue().get_type();
+            let array_type = builder.context.new_array_type(None, field2_type, 7);
+            let ptr = builder.context.new_cast(None, args[3], array_type.make_pointer());
+            let field2_ptr =
+                builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
+            builder.llbb().add_assignment(
+                None,
+                field2_ptr.dereference(None),
+                ptr.dereference(None),
+            );
+            return_value = result.to_rvalue();
+        }
+        "__builtin_ia32_aesdec128kl_u8"
+        | "__builtin_ia32_aesenc128kl_u8"
+        | "__builtin_ia32_aesdec256kl_u8"
+        | "__builtin_ia32_aesenc256kl_u8" => {
+            // The builtin for aesdec/aesenc writes the result in its pointer argument while
+            // llvm.x86.aesdec128kl returns a value.
+            // We added a result pointer argument and now need to assign its value to the return_value expected by
+            // the LLVM intrinsic.
+            let (aes_output_type, field1, field2) = aes_output_type(builder);
+            let result = builder.current_func().new_local(None, aes_output_type, "result");
+            let field1 = result.access_field(None, field1);
+            builder.llbb().add_assignment(None, field1, return_value);
+            let field2 = result.access_field(None, field2);
+            let ptr = builder.context.new_cast(
+                None,
+                args[0],
+                field2.to_rvalue().get_type().make_pointer(),
+            );
+            builder.llbb().add_assignment(None, field2, ptr.dereference(None));
+            return_value = result.to_rvalue();
+        }
+        "__builtin_ia32_aesencwide128kl_u8"
+        | "__builtin_ia32_aesdecwide128kl_u8"
+        | "__builtin_ia32_aesencwide256kl_u8"
+        | "__builtin_ia32_aesdecwide256kl_u8" => {
+            // The builtin for aesdecwide/aesencwide writes the result in its pointer argument while
+            // llvm.x86.aesencwide128kl returns a value.
+            // We added a result pointer argument and now need to assign its value to the return_value expected by
+            // the LLVM intrinsic.
+            let (aes_output_type, field1, field2) = wide_aes_output_type(builder);
+            let result = builder.current_func().new_local(None, aes_output_type, "result");
+            let field1 = result.access_field(None, field1);
+            builder.llbb().add_assignment(None, field1, return_value);
+            let field2 = result.access_field(None, field2);
+            let field2_type = field2.to_rvalue().get_type();
+            let array_type = builder.context.new_array_type(None, field2_type, 8);
+            let ptr = builder.context.new_cast(None, args[0], array_type.make_pointer());
+            let field2_ptr =
+                builder.context.new_cast(None, field2.get_address(None), array_type.make_pointer());
+            builder.llbb().add_assignment(
+                None,
+                field2_ptr.dereference(None),
+                ptr.dereference(None),
+            );
+            return_value = result.to_rvalue();
+        }
         _ => (),
     }
 
@@ -915,16 +1122,6 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
         "llvm.ctlz.v4i64" => "__builtin_ia32_vplzcntq_256_mask",
         "llvm.ctlz.v2i64" => "__builtin_ia32_vplzcntq_128_mask",
         "llvm.ctpop.v32i16" => "__builtin_ia32_vpopcountw_v32hi",
-        "llvm.x86.fma.vfmsub.sd" => "__builtin_ia32_vfmsubsd3",
-        "llvm.x86.fma.vfmsub.ss" => "__builtin_ia32_vfmsubss3",
-        "llvm.x86.fma.vfmsubadd.pd" => "__builtin_ia32_vfmaddsubpd",
-        "llvm.x86.fma.vfmsubadd.pd.256" => "__builtin_ia32_vfmaddsubpd256",
-        "llvm.x86.fma.vfmsubadd.ps" => "__builtin_ia32_vfmaddsubps",
-        "llvm.x86.fma.vfmsubadd.ps.256" => "__builtin_ia32_vfmaddsubps256",
-        "llvm.x86.fma.vfnmadd.sd" => "__builtin_ia32_vfnmaddsd3",
-        "llvm.x86.fma.vfnmadd.ss" => "__builtin_ia32_vfnmaddss3",
-        "llvm.x86.fma.vfnmsub.sd" => "__builtin_ia32_vfnmsubsd3",
-        "llvm.x86.fma.vfnmsub.ss" => "__builtin_ia32_vfnmsubss3",
         "llvm.x86.avx512.conflict.d.512" => "__builtin_ia32_vpconflictsi_512_mask",
         "llvm.x86.avx512.conflict.d.256" => "__builtin_ia32_vpconflictsi_256_mask",
         "llvm.x86.avx512.conflict.d.128" => "__builtin_ia32_vpconflictsi_128_mask",
@@ -1002,8 +1199,6 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
         "llvm.fshr.v32i16" => "__builtin_ia32_vpshrdv_v32hi",
         "llvm.fshr.v16i16" => "__builtin_ia32_vpshrdv_v16hi",
         "llvm.fshr.v8i16" => "__builtin_ia32_vpshrdv_v8hi",
-        "llvm.x86.fma.vfmadd.sd" => "__builtin_ia32_vfmaddsd3",
-        "llvm.x86.fma.vfmadd.ss" => "__builtin_ia32_vfmaddss3",
         "llvm.x86.rdrand.64" => "__builtin_ia32_rdrand64_step",
 
         // The above doc points to unknown builtins for the following, so override them:
@@ -1324,6 +1519,16 @@ pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function
         "llvm.x86.avx512fp16.mask.vfmadd.cph.256" => "__builtin_ia32_vfmaddcph256_mask3",
         "llvm.x86.avx512fp16.mask.vfcmadd.cph.128" => "__builtin_ia32_vfcmaddcph128_mask3",
         "llvm.x86.avx512fp16.mask.vfmadd.cph.128" => "__builtin_ia32_vfmaddcph128_mask3",
+        "llvm.x86.encodekey128" => "__builtin_ia32_encodekey128_u32",
+        "llvm.x86.encodekey256" => "__builtin_ia32_encodekey256_u32",
+        "llvm.x86.aesenc128kl" => "__builtin_ia32_aesenc128kl_u8",
+        "llvm.x86.aesdec128kl" => "__builtin_ia32_aesdec128kl_u8",
+        "llvm.x86.aesenc256kl" => "__builtin_ia32_aesenc256kl_u8",
+        "llvm.x86.aesdec256kl" => "__builtin_ia32_aesdec256kl_u8",
+        "llvm.x86.aesencwide128kl" => "__builtin_ia32_aesencwide128kl_u8",
+        "llvm.x86.aesdecwide128kl" => "__builtin_ia32_aesdecwide128kl_u8",
+        "llvm.x86.aesencwide256kl" => "__builtin_ia32_aesencwide256kl_u8",
+        "llvm.x86.aesdecwide256kl" => "__builtin_ia32_aesdecwide256kl_u8",
 
         // TODO: support the tile builtins:
         "llvm.x86.ldtilecfg" => "__builtin_trap",
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index f38622074f1..d22f4229e23 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -78,6 +78,7 @@ fn get_simple_intrinsic<'gcc, 'tcx>(
         sym::maxnumf64 => "fmax",
         sym::copysignf32 => "copysignf",
         sym::copysignf64 => "copysign",
+        sym::copysignf128 => "copysignl",
         sym::floorf32 => "floorf",
         sym::floorf64 => "floor",
         sym::ceilf32 => "ceilf",
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
index 8b454ab2a42..b897d079249 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
@@ -399,7 +399,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
     }
 
     #[cfg(feature = "master")]
-    if name == sym::simd_insert {
+    if name == sym::simd_insert || name == sym::simd_insert_dyn {
         require!(
             in_elem == arg_tys[2],
             InvalidMonomorphization::InsertedType {
@@ -410,6 +410,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
                 out_ty: arg_tys[2]
             }
         );
+
+        // TODO(antoyo): For simd_insert, check if the index is a constant of the correct size.
         let vector = args[0].immediate();
         let index = args[1].immediate();
         let value = args[2].immediate();
@@ -422,13 +424,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
     }
 
     #[cfg(feature = "master")]
-    if name == sym::simd_extract {
+    if name == sym::simd_extract || name == sym::simd_extract_dyn {
         require!(
             ret_ty == in_elem,
             InvalidMonomorphization::ReturnType { span, name, in_elem, in_ty, ret_ty }
         );
+        // TODO(antoyo): For simd_extract, check if the index is a constant of the correct size.
         let vector = args[0].immediate();
-        return Ok(bx.context.new_vector_access(None, vector, args[1].immediate()).to_rvalue());
+        let index = args[1].immediate();
+        return Ok(bx.context.new_vector_access(None, vector, index).to_rvalue());
     }
 
     if name == sym::simd_select {
@@ -443,9 +447,14 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
             m_len == v_len,
             InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len }
         );
+        // TODO: also support unsigned integers.
         match *m_elem_ty.kind() {
             ty::Int(_) => {}
-            _ => return_error!(InvalidMonomorphization::MaskType { span, name, ty: m_elem_ty }),
+            _ => return_error!(InvalidMonomorphization::MaskWrongElementType {
+                span,
+                name,
+                ty: m_elem_ty
+            }),
         }
         return Ok(bx.vector_select(args[0].immediate(), args[1].immediate(), args[2].immediate()));
     }
@@ -987,19 +996,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
         assert_eq!(pointer_count - 1, ptr_count(element_ty0));
         assert_eq!(underlying_ty, non_ptr(element_ty0));
 
-        // The element type of the third argument must be a signed integer type of any width:
+        // The element type of the third argument must be an integer type of any width:
+        // TODO: also support unsigned integers.
         let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
         match *element_ty2.kind() {
             ty::Int(_) => (),
             _ => {
                 require!(
                     false,
-                    InvalidMonomorphization::ThirdArgElementType {
-                        span,
-                        name,
-                        expected_element: element_ty2,
-                        third_arg: arg_tys[2]
-                    }
+                    InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
                 );
             }
         }
@@ -1105,17 +1110,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
         assert_eq!(underlying_ty, non_ptr(element_ty0));
 
         // The element type of the third argument must be a signed integer type of any width:
+        // TODO: also support unsigned integers.
         match *element_ty2.kind() {
             ty::Int(_) => (),
             _ => {
                 require!(
                     false,
-                    InvalidMonomorphization::ThirdArgElementType {
-                        span,
-                        name,
-                        expected_element: element_ty2,
-                        third_arg: arg_tys[2]
-                    }
+                    InvalidMonomorphization::MaskWrongElementType { span, name, ty: element_ty2 }
                 );
             }
         }
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
index bfa23174a19..2c5a7871683 100644
--- a/compiler/rustc_codegen_gcc/src/lib.rs
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -22,7 +22,7 @@
 #![warn(rust_2018_idioms)]
 #![warn(unused_lifetimes)]
 #![deny(clippy::pattern_type_mismatch)]
-#![allow(clippy::needless_lifetimes)]
+#![allow(clippy::needless_lifetimes, clippy::uninlined_format_args)]
 
 // Some "regular" crates we want to share with rustc
 extern crate object;
@@ -102,7 +102,7 @@ use rustc_codegen_ssa::back::write::{
 };
 use rustc_codegen_ssa::base::codegen_crate;
 use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, WriteBackendMethods};
-use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig};
 use rustc_data_structures::fx::FxIndexMap;
 use rustc_data_structures::sync::IntoDynSyncSend;
 use rustc_errors::DiagCtxtHandle;
@@ -188,10 +188,10 @@ impl CodegenBackend for GccCodegenBackend {
         crate::DEFAULT_LOCALE_RESOURCE
     }
 
-    fn init(&self, sess: &Session) {
+    fn init(&self, _sess: &Session) {
         #[cfg(feature = "master")]
         {
-            let target_cpu = target_cpu(sess);
+            let target_cpu = target_cpu(_sess);
 
             // Get the second TargetInfo with the correct CPU features by setting the arch.
             let context = Context::default();
@@ -260,8 +260,8 @@ impl CodegenBackend for GccCodegenBackend {
             .join(sess)
     }
 
-    fn target_features_cfg(&self, sess: &Session) -> (Vec<Symbol>, Vec<Symbol>) {
-        target_features_cfg(sess, &self.target_info)
+    fn target_config(&self, sess: &Session) -> TargetConfig {
+        target_config(sess, &self.target_info)
     }
 }
 
@@ -485,10 +485,7 @@ fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
 }
 
 /// Returns the features that should be set in `cfg(target_feature)`.
-fn target_features_cfg(
-    sess: &Session,
-    target_info: &LockedTargetInfo,
-) -> (Vec<Symbol>, Vec<Symbol>) {
+fn target_config(sess: &Session, target_info: &LockedTargetInfo) -> TargetConfig {
     // TODO(antoyo): use global_gcc_features.
     let f = |allow_unstable| {
         sess.target
@@ -523,5 +520,14 @@ fn target_features_cfg(
 
     let target_features = f(false);
     let unstable_target_features = f(true);
-    (target_features, unstable_target_features)
+
+    TargetConfig {
+        target_features,
+        unstable_target_features,
+        // There are no known bugs with GCC support for f16 or f128
+        has_reliable_f16: true,
+        has_reliable_f16_math: true,
+        has_reliable_f128: true,
+        has_reliable_f128_math: true,
+    }
 }
diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs
index ae98b3d0b56..5745acce6fe 100644
--- a/compiler/rustc_codegen_gcc/src/type_of.rs
+++ b/compiler/rustc_codegen_gcc/src/type_of.rs
@@ -102,10 +102,10 @@ fn uncached_gcc_type<'gcc, 'tcx>(
             let mut name = with_no_trimmed_paths!(layout.ty.to_string());
             if let (&ty::Adt(def, _), &Variants::Single { index }) =
                 (layout.ty.kind(), &layout.variants)
+                && def.is_enum()
+                && !def.variants().is_empty()
             {
-                if def.is_enum() && !def.variants().is_empty() {
-                    write!(&mut name, "::{}", def.variant(index).name).unwrap();
-                }
+                write!(&mut name, "::{}", def.variant(index).name).unwrap();
             }
             if let (&ty::Coroutine(_, _), &Variants::Single { index }) =
                 (layout.ty.kind(), &layout.variants)
@@ -264,10 +264,10 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
     }
 
     fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
-        if let BackendRepr::Scalar(ref scalar) = self.backend_repr {
-            if scalar.is_bool() {
-                return cx.type_i1();
-            }
+        if let BackendRepr::Scalar(ref scalar) = self.backend_repr
+            && scalar.is_bool()
+        {
+            return cx.type_i1();
         }
         self.gcc_type(cx)
     }