about summary refs log tree commit diff
path: root/compiler/rustc_codegen_gcc/src
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_gcc/src')
-rw-r--r--compiler/rustc_codegen_gcc/src/abi.rs107
-rw-r--r--compiler/rustc_codegen_gcc/src/allocator.rs62
-rw-r--r--compiler/rustc_codegen_gcc/src/asm.rs223
-rw-r--r--compiler/rustc_codegen_gcc/src/attributes.rs77
-rw-r--r--compiler/rustc_codegen_gcc/src/back/lto.rs94
-rw-r--r--compiler/rustc_codegen_gcc/src/back/write.rs54
-rw-r--r--compiler/rustc_codegen_gcc/src/base.rs59
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs1431
-rw-r--r--compiler/rustc_codegen_gcc/src/callee.rs259
-rw-r--r--compiler/rustc_codegen_gcc/src/common.rs168
-rw-r--r--compiler/rustc_codegen_gcc/src/consts.rs143
-rw-r--r--compiler/rustc_codegen_gcc/src/context.rs252
-rw-r--r--compiler/rustc_codegen_gcc/src/debuginfo.rs243
-rw-r--r--compiler/rustc_codegen_gcc/src/declare.rs225
-rw-r--r--compiler/rustc_codegen_gcc/src/errors.rs23
-rw-r--r--compiler/rustc_codegen_gcc/src/gcc_util.rs67
-rw-r--r--compiler/rustc_codegen_gcc/src/int.rs942
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/archs.rs27
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs524
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs1101
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/simd.rs230
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs240
-rw-r--r--compiler/rustc_codegen_gcc/src/mono_item.rs39
-rw-r--r--compiler/rustc_codegen_gcc/src/type_.rs71
-rw-r--r--compiler/rustc_codegen_gcc/src/type_of.rs163
25 files changed, 4175 insertions, 2649 deletions
diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs
index f601cd95f2a..b098594dbcc 100644
--- a/compiler/rustc_codegen_gcc/src/abi.rs
+++ b/compiler/rustc_codegen_gcc/src/abi.rs
@@ -18,17 +18,16 @@ impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
     fn get_param(&mut self, index: usize) -> Self::Value {
         let func = self.current_func();
         let param = func.get_param(index as i32);
-        let on_stack =
-            if let Some(on_stack_param_indices) = self.on_stack_function_params.borrow().get(&func) {
-                on_stack_param_indices.contains(&index)
-            }
-            else {
-                false
-            };
+        let on_stack = if let Some(on_stack_param_indices) =
+            self.on_stack_function_params.borrow().get(&func)
+        {
+            on_stack_param_indices.contains(&index)
+        } else {
+            false
+        };
         if on_stack {
             param.to_lvalue().get_address(None)
-        }
-        else {
+        } else {
             param.to_rvalue()
         }
     }
@@ -37,13 +36,14 @@ impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
 impl GccType for CastTarget {
     fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
         let rest_gcc_unit = self.rest.unit.gcc_type(cx);
-        let (rest_count, rem_bytes) =
-            if self.rest.unit.size.bytes() == 0 {
-                (0, 0)
-            }
-            else {
-                (self.rest.total.bytes() / self.rest.unit.size.bytes(), self.rest.total.bytes() % self.rest.unit.size.bytes())
-            };
+        let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
+            (0, 0)
+        } else {
+            (
+                self.rest.total.bytes() / self.rest.unit.size.bytes(),
+                self.rest.total.bytes() % self.rest.unit.size.bytes(),
+            )
+        };
 
         if self.prefix.iter().all(|x| x.is_none()) {
             // Simplify to a single unit when there is no prefix and size <= unit size
@@ -61,9 +61,7 @@ impl GccType for CastTarget {
         let mut args: Vec<_> = self
             .prefix
             .iter()
-            .flat_map(|option_reg| {
-                option_reg.map(|reg| reg.gcc_type(cx))
-            })
+            .flat_map(|option_reg| option_reg.map(|reg| reg.gcc_type(cx)))
             .chain((0..rest_count).map(|_| rest_gcc_unit))
             .collect();
 
@@ -86,12 +84,10 @@ impl GccType for Reg {
     fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, '_>) -> Type<'gcc> {
         match self.kind {
             RegKind::Integer => cx.type_ix(self.size.bits()),
-            RegKind::Float => {
-                match self.size.bits() {
-                    32 => cx.type_f32(),
-                    64 => cx.type_f64(),
-                    _ => bug!("unsupported float: {:?}", self),
-                }
+            RegKind::Float => match self.size.bits() {
+                32 => cx.type_f32(),
+                64 => cx.type_f64(),
+                _ => bug!("unsupported float: {:?}", self),
             },
             RegKind::Vector => unimplemented!(), //cx.type_vector(cx.type_i8(), self.size.bytes()),
         }
@@ -119,19 +115,18 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
 
         // This capacity calculation is approximate.
         let mut argument_tys = Vec::with_capacity(
-            self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 }
+            self.args.len() + if let PassMode::Indirect { .. } = self.ret.mode { 1 } else { 0 },
         );
 
-        let return_type =
-            match self.ret.mode {
-                PassMode::Ignore => cx.type_void(),
-                PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
-                PassMode::Cast { ref cast, .. } => cast.gcc_type(cx),
-                PassMode::Indirect { .. } => {
-                    argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
-                    cx.type_void()
-                }
-            };
+        let return_type = match self.ret.mode {
+            PassMode::Ignore => cx.type_void(),
+            PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx),
+            PassMode::Cast { ref cast, .. } => cast.gcc_type(cx),
+            PassMode::Indirect { .. } => {
+                argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
+                cx.type_void()
+            }
+        };
         #[cfg(feature = "master")]
         let mut non_null_args = Vec::new();
 
@@ -149,17 +144,23 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
             ty
         };
         #[cfg(not(feature = "master"))]
-        let apply_attrs = |ty: Type<'gcc>, _attrs: &ArgAttributes, _arg_index: usize| {
-            ty
-        };
+        let apply_attrs = |ty: Type<'gcc>, _attrs: &ArgAttributes, _arg_index: usize| ty;
 
         for arg in self.args.iter() {
             let arg_ty = match arg.mode {
                 PassMode::Ignore => continue,
                 PassMode::Pair(a, b) => {
                     let arg_pos = argument_tys.len();
-                    argument_tys.push(apply_attrs(arg.layout.scalar_pair_element_gcc_type(cx, 0), &a, arg_pos));
-                    argument_tys.push(apply_attrs(arg.layout.scalar_pair_element_gcc_type(cx, 1), &b, arg_pos + 1));
+                    argument_tys.push(apply_attrs(
+                        arg.layout.scalar_pair_element_gcc_type(cx, 0),
+                        &a,
+                        arg_pos,
+                    ));
+                    argument_tys.push(apply_attrs(
+                        arg.layout.scalar_pair_element_gcc_type(cx, 1),
+                        &b,
+                        arg_pos + 1,
+                    ));
                     continue;
                 }
                 PassMode::Cast { ref cast, pad_i32 } => {
@@ -174,14 +175,17 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
                     // This is a "byval" argument, so we don't apply the `restrict` attribute on it.
                     on_stack_param_indices.insert(argument_tys.len());
                     arg.memory_ty(cx)
-                },
-                PassMode::Direct(attrs) => apply_attrs(arg.layout.immediate_gcc_type(cx), &attrs, argument_tys.len()),
+                }
+                PassMode::Direct(attrs) => {
+                    apply_attrs(arg.layout.immediate_gcc_type(cx), &attrs, argument_tys.len())
+                }
                 PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => {
                     apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs, argument_tys.len())
                 }
                 PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => {
                     assert!(!on_stack);
-                    let ty = apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs, argument_tys.len());
+                    let ty =
+                        apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs, argument_tys.len());
                     apply_attrs(ty, &meta_attrs, argument_tys.len())
                 }
             };
@@ -207,15 +211,14 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
 
     fn ptr_to_gcc_type(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
         // FIXME(antoyo): Should we do something with `FnAbiGcc::fn_attributes`?
-        let FnAbiGcc {
-            return_type,
-            arguments_type,
-            is_c_variadic,
+        let FnAbiGcc { return_type, arguments_type, is_c_variadic, on_stack_param_indices, .. } =
+            self.gcc_type(cx);
+        let pointer_type =
+            cx.context.new_function_pointer_type(None, return_type, &arguments_type, is_c_variadic);
+        cx.on_stack_params.borrow_mut().insert(
+            pointer_type.dyncast_function_ptr_type().expect("function ptr type"),
             on_stack_param_indices,
-            ..
-        } = self.gcc_type(cx);
-        let pointer_type = cx.context.new_function_pointer_type(None, return_type, &arguments_type, is_c_variadic);
-        cx.on_stack_params.borrow_mut().insert(pointer_type.dyncast_function_ptr_type().expect("function ptr type"), on_stack_param_indices);
+        );
         pointer_type
     }
 }
diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs
index 7c7044830f3..deeb55e9d12 100644
--- a/compiler/rustc_codegen_gcc/src/allocator.rs
+++ b/compiler/rustc_codegen_gcc/src/allocator.rs
@@ -1,4 +1,4 @@
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use gccjit::FnAttribute;
 use gccjit::{Context, FunctionType, GlobalKind, ToRValue, Type};
 use rustc_ast::expand::allocator::{
@@ -11,15 +11,20 @@ use rustc_session::config::OomStrategy;
 
 use crate::GccContext;
 
-pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) {
+pub(crate) unsafe fn codegen(
+    tcx: TyCtxt<'_>,
+    mods: &mut GccContext,
+    _module_name: &str,
+    kind: AllocatorKind,
+    alloc_error_handler_kind: AllocatorKind,
+) {
     let context = &mods.context;
-    let usize =
-        match tcx.sess.target.pointer_width {
-            16 => context.new_type::<u16>(),
-            32 => context.new_type::<u32>(),
-            64 => context.new_type::<u64>(),
-            tws => bug!("Unsupported target word size for int: {}", tws),
-        };
+    let usize = match tcx.sess.target.pointer_width {
+        16 => context.new_type::<u16>(),
+        32 => context.new_type::<u32>(),
+        64 => context.new_type::<u64>(),
+        tws => bug!("Unsupported target word size for int: {}", tws),
+    };
     let i8 = context.new_type::<i8>();
     let i8p = i8.make_pointer();
 
@@ -58,7 +63,7 @@ pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut GccContext, _module_nam
         tcx,
         context,
         "__rust_alloc_error_handler",
-        &alloc_error_handler_name(alloc_error_handler_kind),
+        alloc_error_handler_name(alloc_error_handler_kind),
         &[usize, usize],
         None,
     );
@@ -85,24 +90,42 @@ fn create_wrapper_function(
 ) {
     let void = context.new_type::<()>();
 
-    let args: Vec<_> = types.iter().enumerate()
-        .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+    let args: Vec<_> = types
+        .iter()
+        .enumerate()
+        .map(|(index, typ)| context.new_parameter(None, *typ, format!("param{}", index)))
         .collect();
-    let func = context.new_function(None, FunctionType::Exported, output.unwrap_or(void), &args, from_name, false);
+    let func = context.new_function(
+        None,
+        FunctionType::Exported,
+        output.unwrap_or(void),
+        &args,
+        from_name,
+        false,
+    );
 
     if tcx.sess.default_hidden_visibility() {
-        #[cfg(feature="master")]
+        #[cfg(feature = "master")]
         func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
     }
     if tcx.sess.must_emit_unwind_tables() {
         // TODO(antoyo): emit unwind tables.
     }
 
-    let args: Vec<_> = types.iter().enumerate()
-        .map(|(index, typ)| context.new_parameter(None, *typ, &format!("param{}", index)))
+    let args: Vec<_> = types
+        .iter()
+        .enumerate()
+        .map(|(index, typ)| context.new_parameter(None, *typ, format!("param{}", index)))
         .collect();
-    let callee = context.new_function(None, FunctionType::Extern, output.unwrap_or(void), &args, to_name, false);
-    #[cfg(feature="master")]
+    let callee = context.new_function(
+        None,
+        FunctionType::Extern,
+        output.unwrap_or(void),
+        &args,
+        to_name,
+        false,
+    );
+    #[cfg(feature = "master")]
     callee.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
 
     let block = func.new_block("entry");
@@ -116,8 +139,7 @@ fn create_wrapper_function(
     //llvm::LLVMSetTailCall(ret, True);
     if output.is_some() {
         block.end_with_return(None, ret);
-    }
-    else {
+    } else {
         block.end_with_void_return(None);
     }
 
diff --git a/compiler/rustc_codegen_gcc/src/asm.rs b/compiler/rustc_codegen_gcc/src/asm.rs
index 78e8e32b972..a237f3e6490 100644
--- a/compiler/rustc_codegen_gcc/src/asm.rs
+++ b/compiler/rustc_codegen_gcc/src/asm.rs
@@ -2,7 +2,10 @@ use gccjit::{LValue, RValue, ToRValue, Type};
 use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
 use rustc_codegen_ssa::mir::operand::OperandValue;
 use rustc_codegen_ssa::mir::place::PlaceRef;
-use rustc_codegen_ssa::traits::{AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef, InlineAsmOperandRef};
+use rustc_codegen_ssa::traits::{
+    AsmBuilderMethods, AsmMethods, BaseTypeMethods, BuilderMethods, GlobalAsmOperandRef,
+    InlineAsmOperandRef,
+};
 
 use rustc_middle::{bug, ty::Instance};
 use rustc_span::Span;
@@ -11,11 +14,10 @@ use rustc_target::asm::*;
 use std::borrow::Cow;
 
 use crate::builder::Builder;
+use crate::callee::get_fn;
 use crate::context::CodegenCx;
 use crate::errors::UnwindingInlineAsm;
 use crate::type_of::LayoutGccExt;
-use crate::callee::get_fn;
-
 
 // Rust asm! and GCC Extended Asm semantics differ substantially.
 //
@@ -68,7 +70,6 @@ use crate::callee::get_fn;
 const ATT_SYNTAX_INS: &str = ".att_syntax noprefix\n\t";
 const INTEL_SYNTAX_INS: &str = "\n\t.intel_syntax noprefix";
 
-
 struct AsmOutOperand<'a, 'tcx, 'gcc> {
     rust_idx: usize,
     constraint: &'a str,
@@ -76,13 +77,13 @@ struct AsmOutOperand<'a, 'tcx, 'gcc> {
     readwrite: bool,
 
     tmp_var: LValue<'gcc>,
-    out_place: Option<PlaceRef<'tcx, RValue<'gcc>>>
+    out_place: Option<PlaceRef<'tcx, RValue<'gcc>>>,
 }
 
 struct AsmInOperand<'a, 'tcx> {
     rust_idx: usize,
     constraint: Cow<'a, str>,
-    val: RValue<'tcx>
+    val: RValue<'tcx>,
 }
 
 impl AsmOutOperand<'_, '_, '_> {
@@ -95,23 +96,28 @@ impl AsmOutOperand<'_, '_, '_> {
             res.push('&');
         }
 
-        res.push_str(&self.constraint);
+        res.push_str(self.constraint);
         res
     }
 }
 
 enum ConstraintOrRegister {
     Constraint(&'static str),
-    Register(&'static str)
+    Register(&'static str),
 }
 
-
 impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
-    fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) {
+    fn codegen_inline_asm(
+        &mut self,
+        template: &[InlineAsmTemplatePiece],
+        rust_operands: &[InlineAsmOperandRef<'tcx, Self>],
+        options: InlineAsmOptions,
+        span: &[Span],
+        instance: Instance<'_>,
+        _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>,
+    ) {
         if options.contains(InlineAsmOptions::MAY_UNWIND) {
-            self.sess().dcx()
-                .create_err(UnwindingInlineAsm { span: span[0] })
-                .emit();
+            self.sess().dcx().create_err(UnwindingInlineAsm { span: span[0] }).emit();
             return;
         }
 
@@ -157,32 +163,40 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                     use ConstraintOrRegister::*;
 
                     let (constraint, ty) = match (reg_to_gcc(reg), place) {
-                        (Constraint(constraint), Some(place)) => (constraint, place.layout.gcc_type(self.cx)),
+                        (Constraint(constraint), Some(place)) => {
+                            (constraint, place.layout.gcc_type(self.cx))
+                        }
                         // When `reg` is a class and not an explicit register but the out place is not specified,
                         // we need to create an unused output variable to assign the output to. This var
                         // needs to be of a type that's "compatible" with the register class, but specific type
                         // doesn't matter.
-                        (Constraint(constraint), None) => (constraint, dummy_output_type(self.cx, reg.reg_class())),
+                        (Constraint(constraint), None) => {
+                            (constraint, dummy_output_type(self.cx, reg.reg_class()))
+                        }
                         (Register(_), Some(_)) => {
                             // left for the next pass
-                            continue
-                        },
+                            continue;
+                        }
                         (Register(reg_name), None) => {
                             // `clobber_abi` can add lots of clobbers that are not supported by the target,
                             // such as AVX-512 registers, so we just ignore unsupported registers
-                            let is_target_supported = reg.reg_class().supported_types(asm_arch).iter()
-                                .any(|&(_, feature)| {
-                                    if let Some(feature) = feature {
-                                        self.tcx.asm_target_features(instance.def_id()).contains(&feature)
-                                    } else {
-                                        true // Register class is unconditionally supported
-                                    }
-                                });
+                            let is_target_supported =
+                                reg.reg_class().supported_types(asm_arch).iter().any(
+                                    |&(_, feature)| {
+                                        if let Some(feature) = feature {
+                                            self.tcx
+                                                .asm_target_features(instance.def_id())
+                                                .contains(&feature)
+                                        } else {
+                                            true // Register class is unconditionally supported
+                                        }
+                                    },
+                                );
 
                             if is_target_supported && !clobbers.contains(&reg_name) {
                                 clobbers.push(reg_name);
                             }
-                            continue
+                            continue;
                         }
                     };
 
@@ -193,7 +207,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                         late,
                         readwrite: false,
                         tmp_var,
-                        out_place: place
+                        out_place: place,
                     });
                 }
 
@@ -202,23 +216,22 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                         inputs.push(AsmInOperand {
                             constraint: Cow::Borrowed(constraint),
                             rust_idx,
-                            val: value.immediate()
+                            val: value.immediate(),
                         });
-                    }
-                    else {
+                    } else {
                         // left for the next pass
-                        continue
+                        continue;
                     }
                 }
 
                 InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
-                    let constraint = if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
-                        constraint
-                    }
-                    else {
-                        // left for the next pass
-                        continue
-                    };
+                    let constraint =
+                        if let ConstraintOrRegister::Constraint(constraint) = reg_to_gcc(reg) {
+                            constraint
+                        } else {
+                            // left for the next pass
+                            continue;
+                        };
 
                     // Rustc frontend guarantees that input and output types are "compatible",
                     // so we can just use input var's type for the output variable.
@@ -249,7 +262,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                         inputs.push(AsmInOperand {
                             constraint,
                             rust_idx,
-                            val: in_value.immediate()
+                            val: in_value.immediate(),
                         });
                     }
                 }
@@ -267,7 +280,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                 InlineAsmOperandRef::SymStatic { def_id } => {
                     // TODO(@Amanieu): Additional mangling is needed on
                     // some targets to add a leading underscore (Mach-O).
-                    constants_len += self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len();
+                    constants_len +=
+                        self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name.len();
                 }
             }
         }
@@ -280,10 +294,9 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                     if let ConstraintOrRegister::Register(reg_name) = reg_to_gcc(reg) {
                         let out_place = if let Some(place) = place {
                             place
-                        }
-                        else {
+                        } else {
                             // processed in the previous pass
-                            continue
+                            continue;
                         };
 
                         let ty = out_place.layout.gcc_type(self.cx);
@@ -291,12 +304,12 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                         tmp_var.set_register_name(reg_name);
 
                         outputs.push(AsmOutOperand {
-                            constraint: "r".into(),
+                            constraint: "r",
                             rust_idx,
                             late,
                             readwrite: false,
                             tmp_var,
-                            out_place: Some(out_place)
+                            out_place: Some(out_place),
                         });
                     }
 
@@ -314,7 +327,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                         inputs.push(AsmInOperand {
                             constraint: "r".into(),
                             rust_idx,
-                            val: reg_var.to_rvalue()
+                            val: reg_var.to_rvalue(),
                         });
                     }
 
@@ -330,7 +343,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                         tmp_var.set_register_name(reg_name);
 
                         outputs.push(AsmOutOperand {
-                            constraint: "r".into(),
+                            constraint: "r",
                             rust_idx,
                             late,
                             readwrite: false,
@@ -342,7 +355,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                         inputs.push(AsmInOperand {
                             constraint,
                             rust_idx,
-                            val: in_value.immediate()
+                            val: in_value.immediate(),
                         });
                     }
 
@@ -373,7 +386,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
 
         // 3. Build the template string
 
-        let mut template_str = String::with_capacity(estimate_template_length(template, constants_len, att_dialect));
+        let mut template_str =
+            String::with_capacity(estimate_template_length(template, constants_len, att_dialect));
         if att_dialect {
             template_str.push_str(ATT_SYNTAX_INS);
         }
@@ -383,16 +397,15 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                 InlineAsmTemplatePiece::String(ref string) => {
                     for char in string.chars() {
                         // TODO(antoyo): might also need to escape | if rustc doesn't do it.
-                        let escaped_char =
-                            match char {
-                                '%' => "%%",
-                                '{' => "%{",
-                                '}' => "%}",
-                                _ => {
-                                    template_str.push(char);
-                                    continue;
-                                },
-                            };
+                        let escaped_char = match char {
+                            '%' => "%%",
+                            '{' => "%{",
+                            '}' => "%}",
+                            _ => {
+                                template_str.push(char);
+                                continue;
+                            }
+                        };
                         template_str.push_str(escaped_char);
                     }
                 }
@@ -408,9 +421,10 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                     };
 
                     match rust_operands[operand_idx] {
-                        InlineAsmOperandRef::Out { reg, ..  } => {
+                        InlineAsmOperandRef::Out { reg, .. } => {
                             let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
-                            let gcc_index = outputs.iter()
+                            let gcc_index = outputs
+                                .iter()
                                 .position(|op| operand_idx == op.rust_idx)
                                 .expect("wrong rust index");
                             push_to_template(modifier, gcc_index);
@@ -418,7 +432,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
 
                         InlineAsmOperandRef::In { reg, .. } => {
                             let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
-                            let in_gcc_index = inputs.iter()
+                            let in_gcc_index = inputs
+                                .iter()
                                 .position(|op| operand_idx == op.rust_idx)
                                 .expect("wrong rust index");
                             let gcc_index = in_gcc_index + outputs.len();
@@ -429,7 +444,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
                             let modifier = modifier_to_gcc(asm_arch, reg.reg_class(), modifier);
 
                             // The input register is tied to the output, so we can just use the index of the output register
-                            let gcc_index = outputs.iter()
+                            let gcc_index = outputs
+                                .iter()
                                 .position(|op| operand_idx == op.rust_idx)
                                 .expect("wrong rust index");
                             push_to_template(modifier, gcc_index);
@@ -496,7 +512,8 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
         }
         if options.contains(InlineAsmOptions::NORETURN) {
             let builtin_unreachable = self.context.get_builtin_function("__builtin_unreachable");
-            let builtin_unreachable: RValue<'gcc> = unsafe { std::mem::transmute(builtin_unreachable) };
+            let builtin_unreachable: RValue<'gcc> =
+                unsafe { std::mem::transmute(builtin_unreachable) };
             self.call(self.type_void(), None, None, builtin_unreachable, &[], None);
         }
 
@@ -517,19 +534,23 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 }
 
-fn estimate_template_length(template: &[InlineAsmTemplatePiece], constants_len: usize, att_dialect: bool) -> usize {
-    let len: usize = template.iter().map(|piece| {
-        match *piece {
-            InlineAsmTemplatePiece::String(ref string) => {
-                string.len()
-            }
-            InlineAsmTemplatePiece::Placeholder { .. } => {
-                // '%' + 1 char modifier + 1 char index
-                3
+fn estimate_template_length(
+    template: &[InlineAsmTemplatePiece],
+    constants_len: usize,
+    att_dialect: bool,
+) -> usize {
+    let len: usize = template
+        .iter()
+        .map(|piece| {
+            match *piece {
+                InlineAsmTemplatePiece::String(ref string) => string.len(),
+                InlineAsmTemplatePiece::Placeholder { .. } => {
+                    // '%' + 1 char modifier + 1 char index
+                    3
+                }
             }
-        }
-    })
-    .sum();
+        })
+        .sum();
 
     // increase it by 5% to account for possible '%' signs that'll be duplicated
     // I pulled the number out of blue, but should be fair enough
@@ -562,7 +583,7 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
 
                 _ => unimplemented!(),
             }
-        },
+        }
         // They can be retrieved from https://gcc.gnu.org/onlinedocs/gcc/Machine-Constraints.html
         InlineAsmRegOrRegClass::RegClass(reg) => match reg {
             InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
@@ -610,7 +631,7 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
             InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
             | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
                 unreachable!("clobber-only")
-            },
+            }
             InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
             InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
             InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
@@ -637,7 +658,7 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister {
             InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a",
             InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
             InlineAsmRegClass::Err => unreachable!(),
-        }
+        },
     };
 
     ConstraintOrRegister::Constraint(constraint)
@@ -653,7 +674,7 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
         | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
             unimplemented!()
         }
-        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)=> cx.type_i32(),
+        InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
         InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
         | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
         InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
@@ -686,7 +707,7 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
         InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
         | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
             unreachable!("clobber-only")
-        },
+        }
         InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
         InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
         InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => cx.type_f32(),
@@ -704,9 +725,9 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
         InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
         InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
             bug!("LLVM backend does not support SPIR-V")
-        },
+        }
         InlineAsmRegClass::S390x(
-            S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr
+            S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr,
         ) => cx.type_i32(),
         InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
         InlineAsmRegClass::Err => unreachable!(),
@@ -714,7 +735,13 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl
 }
 
 impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
-    fn codegen_global_asm(&self, template: &[InlineAsmTemplatePiece], operands: &[GlobalAsmOperandRef<'tcx>], options: InlineAsmOptions, _line_spans: &[Span]) {
+    fn codegen_global_asm(
+        &self,
+        template: &[InlineAsmTemplatePiece],
+        operands: &[GlobalAsmOperandRef<'tcx>],
+        options: InlineAsmOptions,
+        _line_spans: &[Span],
+    ) {
         let asm_arch = self.tcx.sess.asm_arch.unwrap();
 
         // Default to Intel syntax on x86
@@ -732,15 +759,17 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
                     let mut index = 0;
                     while index < string.len() {
                         // NOTE: gcc does not allow inline comment, so remove them.
-                        let comment_index = string[index..].find("//")
+                        let comment_index = string[index..]
+                            .find("//")
                             .map(|comment_index| comment_index + index)
                             .unwrap_or(string.len());
                         template_str.push_str(&string[index..comment_index]);
-                        index = string[comment_index..].find('\n')
+                        index = string[comment_index..]
+                            .find('\n')
                             .map(|index| index + comment_index)
                             .unwrap_or(string.len());
                     }
-                },
+                }
                 InlineAsmTemplatePiece::Placeholder { operand_idx, modifier: _, span: _ } => {
                     match operands[operand_idx] {
                         GlobalAsmOperandRef::Const { ref string } => {
@@ -782,14 +811,22 @@ impl<'gcc, 'tcx> AsmMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
     }
 }
 
-fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option<char>) -> Option<char> {
+fn modifier_to_gcc(
+    arch: InlineAsmArch,
+    reg: InlineAsmRegClass,
+    modifier: Option<char>,
+) -> Option<char> {
     // The modifiers can be retrieved from
     // https://gcc.gnu.org/onlinedocs/gcc/Modifiers.html#Modifiers
     match reg {
         InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
         InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
         | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
-            if modifier == Some('v') { None } else { modifier }
+            if modifier == Some('v') {
+                None
+            } else {
+                modifier
+            }
         }
         InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
             unreachable!("clobber-only")
@@ -821,7 +858,13 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option
         }
         InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
         | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
-            None => if arch == InlineAsmArch::X86_64 { Some('q') } else { Some('k') },
+            None => {
+                if arch == InlineAsmArch::X86_64 {
+                    Some('q')
+                } else {
+                    Some('k')
+                }
+            }
             Some('l') => Some('b'),
             Some('h') => Some('h'),
             Some('x') => Some('w'),
diff --git a/compiler/rustc_codegen_gcc/src/attributes.rs b/compiler/rustc_codegen_gcc/src/attributes.rs
index 142f86b003d..8602566ab8f 100644
--- a/compiler/rustc_codegen_gcc/src/attributes.rs
+++ b/compiler/rustc_codegen_gcc/src/attributes.rs
@@ -1,21 +1,24 @@
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use gccjit::FnAttribute;
 use gccjit::Function;
-use rustc_attr::InstructionSetAttr;
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use rustc_attr::InlineAttr;
-use rustc_middle::ty;
-#[cfg(feature="master")]
+use rustc_attr::InstructionSetAttr;
+#[cfg(feature = "master")]
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use rustc_middle::ty;
 use rustc_span::symbol::sym;
 
-use crate::{context::CodegenCx, errors::TiedTargetFeatures};
 use crate::gcc_util::{check_tied_features, to_gcc_features};
+use crate::{context::CodegenCx, errors::TiedTargetFeatures};
 
 /// Get GCC attribute for the provided inline heuristic.
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 #[inline]
-fn inline_attr<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, inline: InlineAttr) -> Option<FnAttribute<'gcc>> {
+fn inline_attr<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    inline: InlineAttr,
+) -> Option<FnAttribute<'gcc>> {
     match inline {
         InlineAttr::Hint => Some(FnAttribute::Inline),
         InlineAttr::Always => Some(FnAttribute::AlwaysInline),
@@ -34,24 +37,22 @@ fn inline_attr<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, inline: InlineAttr) -> Op
 /// attributes.
 pub fn from_fn_attrs<'gcc, 'tcx>(
     cx: &CodegenCx<'gcc, 'tcx>,
-    #[cfg_attr(not(feature="master"), allow(unused_variables))]
-    func: Function<'gcc>,
+    #[cfg_attr(not(feature = "master"), allow(unused_variables))] func: Function<'gcc>,
     instance: ty::Instance<'tcx>,
 ) {
     let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     {
-        let inline =
-            if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
-                InlineAttr::Never
-            }
-            else if codegen_fn_attrs.inline == InlineAttr::None && instance.def.requires_inline(cx.tcx) {
-                InlineAttr::Hint
-            }
-            else {
-                codegen_fn_attrs.inline
-            };
+        let inline = if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
+            InlineAttr::Never
+        } else if codegen_fn_attrs.inline == InlineAttr::None
+            && instance.def.requires_inline(cx.tcx)
+        {
+            InlineAttr::Hint
+        } else {
+            codegen_fn_attrs.inline
+        };
         if let Some(attr) = inline_attr(cx, inline) {
             if let FnAttribute::AlwaysInline = attr {
                 func.add_attribute(FnAttribute::Inline);
@@ -70,18 +71,21 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
         }
     }
 
-    let function_features =
-        codegen_fn_attrs.target_features.iter().map(|features| features.as_str()).collect::<Vec<&str>>();
+    let function_features = codegen_fn_attrs
+        .target_features
+        .iter()
+        .map(|features| features.as_str())
+        .collect::<Vec<&str>>();
 
-    if let Some(features) = check_tied_features(cx.tcx.sess, &function_features.iter().map(|features| (*features, true)).collect()) {
-        let span = cx.tcx
+    if let Some(features) = check_tied_features(
+        cx.tcx.sess,
+        &function_features.iter().map(|features| (*features, true)).collect(),
+    ) {
+        let span = cx
+            .tcx
             .get_attr(instance.def_id(), sym::target_feature)
             .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span);
-        cx.tcx.dcx().create_err(TiedTargetFeatures {
-            features: features.join(", "),
-            span,
-        })
-            .emit();
+        cx.tcx.dcx().create_err(TiedTargetFeatures { features: features.join(", "), span }).emit();
         return;
     }
 
@@ -105,24 +109,25 @@ pub fn from_fn_attrs<'gcc, 'tcx>(
             // compiling Rust for Linux:
             // SSE register return with SSE disabled
             // TODO(antoyo): support soft-float and retpoline-external-thunk.
-            if feature.contains("soft-float") || feature.contains("retpoline-external-thunk") || *feature == "-sse" {
+            if feature.contains("soft-float")
+                || feature.contains("retpoline-external-thunk")
+                || *feature == "-sse"
+            {
                 return None;
             }
 
             if feature.starts_with('-') {
                 Some(format!("no{}", feature))
-            }
-            else if feature.starts_with('+') {
+            } else if feature.starts_with('+') {
                 Some(feature[1..].to_string())
-            }
-            else {
+            } else {
                 Some(feature.to_string())
             }
         })
         .collect::<Vec<_>>()
         .join(",");
     if !target_features.is_empty() {
-        #[cfg(feature="master")]
+        #[cfg(feature = "master")]
         func.add_attribute(FnAttribute::Target(&target_features));
     }
 }
diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs
index c21b7686823..61e0f203ee0 100644
--- a/compiler/rustc_codegen_gcc/src/back/lto.rs
+++ b/compiler/rustc_codegen_gcc/src/back/lto.rs
@@ -1,7 +1,6 @@
 /// GCC requires to use the same toolchain for the whole compilation when doing LTO.
 /// So, we need the same version/commit of the linker (gcc) and lto front-end binaries (lto1,
 /// lto-wrapper, liblto_plugin.so).
-
 // FIXME(antoyo): the executables compiled with LTO are bigger than those compiled without LTO.
 // Since it is the opposite for cg_llvm, check if this is normal.
 //
@@ -17,7 +16,6 @@
 // /usr/bin/ld: warning: type of symbol `_RNvNvNvNtCs5JWOrf9uCus_5rayon11thread_pool19WORKER_THREAD_STATE7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
 // /usr/bin/ld: warning: type of symbol `_RNvNvNvNvNtNtNtCsAj5i4SGTR7_3std4sync4mpmc5waker17current_thread_id5DUMMY7___getit5___KEY' changed from 1 to 6 in /tmp/ccKeUSiR.ltrans0.ltrans.o
 // /usr/bin/ld: warning: incremental linking of LTO and non-LTO objects; using -flinker-output=nolto-rel which will bypass whole program optimization
-
 use std::ffi::CString;
 use std::fs::{self, File};
 use std::path::{Path, PathBuf};
@@ -30,18 +28,16 @@ use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput};
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind};
 use rustc_data_structures::memmap::Mmap;
-use rustc_errors::{FatalError, DiagCtxt};
+use rustc_errors::{DiagCtxt, FatalError};
 use rustc_hir::def_id::LOCAL_CRATE;
 use rustc_middle::dep_graph::WorkProduct;
 use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel};
 use rustc_session::config::{CrateType, Lto};
-use tempfile::{TempDir, tempdir};
+use tempfile::{tempdir, TempDir};
 
 use crate::back::write::save_temp_bitcode;
-use crate::errors::{
-    DynamicLinkingWithLTO, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib,
-};
-use crate::{GccCodegenBackend, GccContext, to_gcc_opt_level};
+use crate::errors::{DynamicLinkingWithLTO, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib};
+use crate::{to_gcc_opt_level, GccCodegenBackend, GccContext};
 
 /// We keep track of the computed LTO cache keys from the previous
 /// session to determine which CGUs we can reuse.
@@ -61,7 +57,10 @@ struct LtoData {
     tmp_path: TempDir,
 }
 
-fn prepare_lto(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt) -> Result<LtoData, FatalError> {
+fn prepare_lto(
+    cgcx: &CodegenContext<GccCodegenBackend>,
+    dcx: &DiagCtxt,
+) -> Result<LtoData, FatalError> {
     let export_threshold = match cgcx.lto {
         // We're just doing LTO for our one crate
         Lto::ThinLocal => SymbolExportLevel::Rust,
@@ -72,14 +71,13 @@ fn prepare_lto(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt) -> Resu
         Lto::No => panic!("didn't request LTO but we're doing LTO"),
     };
 
-    let tmp_path =
-        match tempdir() {
-            Ok(tmp_path) => tmp_path,
-            Err(error) => {
-                eprintln!("Cannot create temporary directory: {}", error);
-                return Err(FatalError);
-            },
-        };
+    let tmp_path = match tempdir() {
+        Ok(tmp_path) => tmp_path,
+        Err(error) => {
+            eprintln!("Cannot create temporary directory: {}", error);
+            return Err(FatalError);
+        }
+    };
 
     let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| {
         if info.level.is_below_threshold(export_threshold) || info.used {
@@ -108,11 +106,10 @@ fn prepare_lto(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt) -> Resu
             if !crate_type_allows_lto(*crate_type) {
                 dcx.emit_err(LtoDisallowed);
                 return Err(FatalError);
-            } else if *crate_type == CrateType::Dylib {
-                if !cgcx.opts.unstable_opts.dylib_lto {
-                    dcx.emit_err(LtoDylib);
-                    return Err(FatalError);
-                }
+            }
+            if *crate_type == CrateType::Dylib && !cgcx.opts.unstable_opts.dylib_lto {
+                dcx.emit_err(LtoDylib);
+                return Err(FatalError);
             }
         }
 
@@ -125,8 +122,7 @@ fn prepare_lto(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt) -> Resu
             let exported_symbols =
                 cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO");
             {
-                let _timer =
-                    cgcx.prof.generic_activity("GCC_lto_generate_symbols_below_threshold");
+                let _timer = cgcx.prof.generic_activity("GCC_lto_generate_symbols_below_threshold");
                 symbols_below_threshold
                     .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter));
             }
@@ -170,10 +166,9 @@ fn prepare_lto(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt) -> Resu
 }
 
 fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> {
-    fs::write(path, obj)
-        .map_err(|error| LtoBitcodeFromRlib {
-            gcc_err: format!("write object file to temp dir: {}", error)
-        })
+    fs::write(path, obj).map_err(|error| LtoBitcodeFromRlib {
+        gcc_err: format!("write object file to temp dir: {}", error),
+    })
 }
 
 /// Performs fat LTO by merging all modules into a single one and returning it
@@ -186,13 +181,25 @@ pub(crate) fn run_fat(
     let dcx = cgcx.create_dcx();
     let lto_data = prepare_lto(cgcx, &dcx)?;
     /*let symbols_below_threshold =
-        lto_data.symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();*/
-    fat_lto(cgcx, &dcx, modules, cached_modules, lto_data.upstream_modules, lto_data.tmp_path,
+    lto_data.symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();*/
+    fat_lto(
+        cgcx,
+        &dcx,
+        modules,
+        cached_modules,
+        lto_data.upstream_modules,
+        lto_data.tmp_path,
         //&symbols_below_threshold,
     )
 }
 
-fn fat_lto(cgcx: &CodegenContext<GccCodegenBackend>, _dcx: &DiagCtxt, modules: Vec<FatLtoInput<GccCodegenBackend>>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, tmp_path: TempDir,
+fn fat_lto(
+    cgcx: &CodegenContext<GccCodegenBackend>,
+    _dcx: &DiagCtxt,
+    modules: Vec<FatLtoInput<GccCodegenBackend>>,
+    cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>,
+    mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>,
+    tmp_path: TempDir,
     //symbols_below_threshold: &[*const libc::c_char],
 ) -> Result<LtoModuleCodegen<GccCodegenBackend>, FatalError> {
     let _timer = cgcx.prof.generic_activity("GCC_fat_lto_build_monolithic_module");
@@ -298,10 +305,15 @@ fn fat_lto(cgcx: &CodegenContext<GccCodegenBackend>, _dcx: &DiagCtxt, modules: V
             match bc_decoded {
                 SerializedModule::Local(ref module_buffer) => {
                     module.module_llvm.should_combine_object_files = true;
-                    module.module_llvm.context.add_driver_option(module_buffer.0.to_str().expect("path"));
-                },
+                    module
+                        .module_llvm
+                        .context
+                        .add_driver_option(module_buffer.0.to_str().expect("path"));
+                }
                 SerializedModule::FromRlib(_) => unimplemented!("from rlib"),
-                SerializedModule::FromUncompressedFile(_) => unimplemented!("from uncompressed file"),
+                SerializedModule::FromUncompressedFile(_) => {
+                    unimplemented!("from uncompressed file")
+                }
             }
             serialized_bitcode.push(bc_decoded);
         }
@@ -309,13 +321,13 @@ fn fat_lto(cgcx: &CodegenContext<GccCodegenBackend>, _dcx: &DiagCtxt, modules: V
 
         // Internalize everything below threshold to help strip out more modules and such.
         /*unsafe {
-            let ptr = symbols_below_threshold.as_ptr();
-            llvm::LLVMRustRunRestrictionPass(
-                llmod,
-                ptr as *const *const libc::c_char,
-                symbols_below_threshold.len() as libc::size_t,
-            );*/
-            save_temp_bitcode(cgcx, &module, "lto.after-restriction");
+        let ptr = symbols_below_threshold.as_ptr();
+        llvm::LLVMRustRunRestrictionPass(
+            llmod,
+            ptr as *const *const libc::c_char,
+            symbols_below_threshold.len() as libc::size_t,
+        );*/
+        save_temp_bitcode(cgcx, &module, "lto.after-restriction");
         //}
     }
 
diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs
index 2f8a54f529c..76a619a1af7 100644
--- a/compiler/rustc_codegen_gcc/src/back/write.rs
+++ b/compiler/rustc_codegen_gcc/src/back/write.rs
@@ -1,19 +1,24 @@
 use std::{env, fs};
 
 use gccjit::OutputKind;
-use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
 use rustc_codegen_ssa::back::link::ensure_removed;
 use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, ModuleConfig};
+use rustc_codegen_ssa::{CompiledModule, ModuleCodegen};
 use rustc_errors::DiagCtxt;
 use rustc_fs_util::link_or_copy;
 use rustc_session::config::OutputType;
 use rustc_span::fatal_error::FatalError;
 use rustc_target::spec::SplitDebuginfo;
 
-use crate::{GccCodegenBackend, GccContext};
 use crate::errors::CopyBitcode;
+use crate::{GccCodegenBackend, GccContext};
 
-pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &DiagCtxt, module: ModuleCodegen<GccContext>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
+pub(crate) unsafe fn codegen(
+    cgcx: &CodegenContext<GccCodegenBackend>,
+    dcx: &DiagCtxt,
+    module: ModuleCodegen<GccContext>,
+    config: &ModuleConfig,
+) -> Result<CompiledModule, FatalError> {
     let _timer = cgcx.prof.generic_activity_with_arg("GCC_module_codegen", &*module.name);
     {
         let context = &module.module_llvm.context;
@@ -51,7 +56,8 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &Dia
                     .generic_activity_with_arg("GCC_module_codegen_emit_bitcode", &*module.name);
                 context.add_command_line_option("-flto=auto");
                 context.add_command_line_option("-flto-partition=one");
-                context.compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str"));
+                context
+                    .compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str"));
             }
 
             if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) {
@@ -65,18 +71,19 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &Dia
                 context.add_command_line_option("-flto-partition=one");
                 context.add_command_line_option("-ffat-lto-objects");
                 // TODO(antoyo): Send -plugin/usr/lib/gcc/x86_64-pc-linux-gnu/11.1.0/liblto_plugin.so to linker (this should be done when specifying the appropriate rustc cli argument).
-                context.compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str"));
+                context
+                    .compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str"));
             }
         }
 
         if config.emit_ir {
-            unimplemented!();
+            let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
+            std::fs::write(out, "").expect("write file");
         }
 
         if config.emit_asm {
-            let _timer = cgcx
-                .prof
-                .generic_activity_with_arg("GCC_module_codegen_emit_asm", &*module.name);
+            let _timer =
+                cgcx.prof.generic_activity_with_arg("GCC_module_codegen_emit_asm", &*module.name);
             let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
             context.compile_to_file(OutputKind::Assembler, path.to_str().expect("path to str"));
         }
@@ -89,7 +96,9 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &Dia
                 if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") {
                     println!("Module {}", module.name);
                 }
-                if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1") || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name) {
+                if env::var("CG_GCCJIT_DUMP_ALL_MODULES").as_deref() == Ok("1")
+                    || env::var("CG_GCCJIT_DUMP_MODULE").as_deref() == Ok(&module.name)
+                {
                     println!("Dumping reproducer {}", module.name);
                     let _ = fs::create_dir("/tmp/reproducers");
                     // FIXME(antoyo): segfault in dump_reproducer_to_file() might be caused by
@@ -117,10 +126,15 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &Dia
                     context.add_driver_option("-fuse-linker-plugin");
 
                     // NOTE: this doesn't actually generate an executable. With the above flags, it combines the .o files together in another .o.
-                    context.compile_to_file(OutputKind::Executable, obj_out.to_str().expect("path to str"));
-                }
-                else {
-                    context.compile_to_file(OutputKind::ObjectFile, obj_out.to_str().expect("path to str"));
+                    context.compile_to_file(
+                        OutputKind::Executable,
+                        obj_out.to_str().expect("path to str"),
+                    );
+                } else {
+                    context.compile_to_file(
+                        OutputKind::ObjectFile,
+                        obj_out.to_str().expect("path to str"),
+                    );
                 }
             }
 
@@ -148,11 +162,19 @@ pub(crate) unsafe fn codegen(cgcx: &CodegenContext<GccCodegenBackend>, dcx: &Dia
     ))
 }
 
-pub(crate) fn link(_cgcx: &CodegenContext<GccCodegenBackend>, _dcx: &DiagCtxt, mut _modules: Vec<ModuleCodegen<GccContext>>) -> Result<ModuleCodegen<GccContext>, FatalError> {
+pub(crate) fn link(
+    _cgcx: &CodegenContext<GccCodegenBackend>,
+    _dcx: &DiagCtxt,
+    mut _modules: Vec<ModuleCodegen<GccContext>>,
+) -> Result<ModuleCodegen<GccContext>, FatalError> {
     unimplemented!();
 }
 
-pub(crate) fn save_temp_bitcode(cgcx: &CodegenContext<GccCodegenBackend>, _module: &ModuleCodegen<GccContext>, _name: &str) {
+pub(crate) fn save_temp_bitcode(
+    cgcx: &CodegenContext<GccCodegenBackend>,
+    _module: &ModuleCodegen<GccContext>,
+    _name: &str,
+) {
     if !cgcx.save_temps {
         return;
     }
diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs
index b0788718da4..2a2d5741d13 100644
--- a/compiler/rustc_codegen_gcc/src/base.rs
+++ b/compiler/rustc_codegen_gcc/src/base.rs
@@ -2,29 +2,26 @@ use std::collections::HashSet;
 use std::env;
 use std::time::Instant;
 
-use gccjit::{
-    FunctionType,
-    GlobalKind,
-};
-use rustc_middle::dep_graph;
-use rustc_middle::ty::TyCtxt;
-#[cfg(feature="master")]
-use rustc_middle::mir::mono::Visibility;
-use rustc_middle::mir::mono::Linkage;
-use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
+use gccjit::{FunctionType, GlobalKind};
 use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
 use rustc_codegen_ssa::mono_item::MonoItemExt;
 use rustc_codegen_ssa::traits::DebugInfoMethods;
+use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
+use rustc_middle::dep_graph;
+use rustc_middle::mir::mono::Linkage;
+#[cfg(feature = "master")]
+use rustc_middle::mir::mono::Visibility;
+use rustc_middle::ty::TyCtxt;
 use rustc_session::config::DebugInfo;
 use rustc_span::Symbol;
 use rustc_target::spec::PanicStrategy;
 
-use crate::{LockedTargetInfo, gcc_util, new_context};
-use crate::GccContext;
 use crate::builder::Builder;
 use crate::context::CodegenCx;
+use crate::GccContext;
+use crate::{gcc_util, new_context, LockedTargetInfo};
 
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 pub fn visibility_to_gcc(linkage: Visibility) -> gccjit::Visibility {
     match linkage {
         Visibility::Default => gccjit::Visibility::Default,
@@ -66,7 +63,11 @@ pub fn linkage_to_gcc(linkage: Linkage) -> FunctionType {
     }
 }
 
-pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: LockedTargetInfo) -> (ModuleCodegen<GccContext>, u64) {
+pub fn compile_codegen_unit(
+    tcx: TyCtxt<'_>,
+    cgu_name: Symbol,
+    target_info: LockedTargetInfo,
+) -> (ModuleCodegen<GccContext>, u64) {
     let prof_timer = tcx.prof.generic_activity("codegen_module");
     let start_time = Instant::now();
 
@@ -85,7 +86,10 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: Lock
     // the time we needed for codegenning it.
     let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
 
-    fn module_codegen(tcx: TyCtxt<'_>, (cgu_name, target_info): (Symbol, LockedTargetInfo)) -> ModuleCodegen<GccContext> {
+    fn module_codegen(
+        tcx: TyCtxt<'_>,
+        (cgu_name, target_info): (Symbol, LockedTargetInfo),
+    ) -> ModuleCodegen<GccContext> {
         let cgu = tcx.codegen_unit(cgu_name);
         // Instantiate monomorphizations without filling out definitions yet...
         let context = new_context(tcx);
@@ -95,7 +99,12 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: Lock
             context.add_driver_option("-fexceptions");
         }
 
-        let disabled_features: HashSet<_> = tcx.sess.opts.cg.target_feature.split(',')
+        let disabled_features: HashSet<_> = tcx
+            .sess
+            .opts
+            .cg
+            .target_feature
+            .split(',')
             .filter(|feature| feature.starts_with('-'))
             .map(|string| &string[1..])
             .collect();
@@ -129,7 +138,13 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: Lock
             context.add_command_line_option(&format!("-march={}", target_cpu));
         }
 
-        if tcx.sess.opts.unstable_opts.function_sections.unwrap_or(tcx.sess.target.function_sections) {
+        if tcx
+            .sess
+            .opts
+            .unstable_opts
+            .function_sections
+            .unwrap_or(tcx.sess.target.function_sections)
+        {
             context.add_command_line_option("-ffunction-sections");
             context.add_command_line_option("-fdata-sections");
         }
@@ -152,19 +167,17 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: Lock
         if env::var("CG_GCCJIT_DUMP_GIMPLE").as_deref() == Ok("1") {
             context.set_dump_initial_gimple(true);
         }
-        context.set_debug_info(true);
         if env::var("CG_GCCJIT_DUMP_EVERYTHING").as_deref() == Ok("1") {
             context.set_dump_everything(true);
         }
         if env::var("CG_GCCJIT_KEEP_INTERMEDIATES").as_deref() == Ok("1") {
             context.set_keep_intermediates(true);
         }
-
         if env::var("CG_GCCJIT_VERBOSE").as_deref() == Ok("1") {
             context.add_driver_option("-v");
         }
 
-        // NOTE: The codegen generates unrechable blocks.
+        // NOTE: The codegen generates unreachable blocks.
         context.set_allow_unreachable_blocks(true);
 
         {
@@ -192,11 +205,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol, target_info: Lock
 
         ModuleCodegen {
             name: cgu_name.to_string(),
-            module_llvm: GccContext {
-                context,
-                should_combine_object_files: false,
-                temp_dir: None,
-            },
+            module_llvm: GccContext { context, should_combine_object_files: false, temp_dir: None },
             kind: ModuleKind::Regular,
         }
     }
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 71a0a4c2e96..f5cda81f6ab 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -4,53 +4,36 @@ use std::convert::TryFrom;
 use std::ops::Deref;
 
 use gccjit::{
-    BinaryOp,
-    Block,
-    ComparisonOp,
-    Context,
-    Function,
-    LValue,
-    RValue,
-    ToRValue,
-    Type,
+    BinaryOp, Block, ComparisonOp, Context, Function, LValue, Location, RValue, ToRValue, Type,
     UnaryOp,
 };
 use rustc_apfloat::{ieee, Float, Round, Status};
-use rustc_codegen_ssa::MemFlags;
 use rustc_codegen_ssa::common::{
     AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
 };
 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
 use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::{
-    BackendTypes,
-    BaseTypeMethods,
-    BuilderMethods,
-    ConstMethods,
-    LayoutTypeMethods,
-    HasCodegen,
-    OverflowOp,
-    StaticBuilderMethods,
+    BackendTypes, BaseTypeMethods, BuilderMethods, ConstMethods, HasCodegen, LayoutTypeMethods,
+    OverflowOp, StaticBuilderMethods,
 };
+use rustc_codegen_ssa::MemFlags;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_middle::bug;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::ty::layout::{
+    FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers,
+    TyAndLayout,
+};
 use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
-use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
-use rustc_span::Span;
 use rustc_span::def_id::DefId;
+use rustc_span::Span;
 use rustc_target::abi::{
-    self,
-    call::FnAbi,
-    Align,
-    HasDataLayout,
-    Size,
-    TargetDataLayout,
-    WrappingRange,
+    self, call::FnAbi, Align, HasDataLayout, Size, TargetDataLayout, WrappingRange,
 };
 use rustc_target::spec::{HasTargetSpec, Target};
 
-use crate::common::{SignType, TypeReflection, type_is_pointer};
+use crate::common::{type_is_pointer, SignType, TypeReflection};
 use crate::context::CodegenCx;
 use crate::intrinsic::llvm;
 use crate::type_of::LayoutGccExt;
@@ -70,54 +53,74 @@ pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
     pub cx: &'a CodegenCx<'gcc, 'tcx>,
     pub block: Block<'gcc>,
     stack_var_count: Cell<usize>,
+    pub location: Option<Location<'gcc>>,
 }
 
 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
     fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
-        Builder {
-            cx,
-            block,
-            stack_var_count: Cell::new(0),
-        }
+        Builder { cx, block, stack_var_count: Cell::new(0), location: None }
     }
 
-    fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+    fn atomic_extremum(
+        &mut self,
+        operation: ExtremumOperation,
+        dst: RValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+    ) -> RValue<'gcc> {
         let size = src.get_type().get_size();
 
         let func = self.current_func();
 
-        let load_ordering =
-            match order {
-                // TODO(antoyo): does this make sense?
-                AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
-                _ => order,
-            };
-        let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size));
-        let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
-        let return_value = func.new_local(None, previous_value.get_type(), "return_value");
-        self.llbb().add_assignment(None, previous_var, previous_value);
-        self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
+        let load_ordering = match order {
+            // TODO(antoyo): does this make sense?
+            AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
+            _ => order,
+        };
+        let previous_value =
+            self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size));
+        let previous_var =
+            func.new_local(self.location, previous_value.get_type(), "previous_value");
+        let return_value = func.new_local(self.location, previous_value.get_type(), "return_value");
+        self.llbb().add_assignment(self.location, previous_var, previous_value);
+        self.llbb().add_assignment(self.location, return_value, previous_var.to_rvalue());
 
         let while_block = func.new_block("while");
         let after_block = func.new_block("after_while");
-        self.llbb().end_with_jump(None, while_block);
+        self.llbb().end_with_jump(self.location, while_block);
 
         // NOTE: since jumps were added and compare_exchange doesn't expect this, the current block in the
         // state need to be updated.
         self.switch_to_block(while_block);
 
-        let comparison_operator =
-            match operation {
-                ExtremumOperation::Max => ComparisonOp::LessThan,
-                ExtremumOperation::Min => ComparisonOp::GreaterThan,
-            };
-
-        let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
-        let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
-        let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
-        let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
+        let comparison_operator = match operation {
+            ExtremumOperation::Max => ComparisonOp::LessThan,
+            ExtremumOperation::Min => ComparisonOp::GreaterThan,
+        };
 
-        while_block.end_with_conditional(None, cond, while_block, after_block);
+        let cond1 = self.context.new_comparison(
+            self.location,
+            comparison_operator,
+            previous_var.to_rvalue(),
+            self.context.new_cast(self.location, src, previous_value.get_type()),
+        );
+        let compare_exchange =
+            self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
+        let cond2 = self.cx.context.new_unary_op(
+            self.location,
+            UnaryOp::LogicalNegate,
+            compare_exchange.get_type(),
+            compare_exchange,
+        );
+        let cond = self.cx.context.new_binary_op(
+            self.location,
+            BinaryOp::LogicalAnd,
+            self.cx.bool_type,
+            cond1,
+            cond2,
+        );
+
+        while_block.end_with_conditional(self.location, cond, while_block, after_block);
 
         // NOTE: since jumps were added in a place rustc does not expect, the current block in the
         // state need to be updated.
@@ -126,29 +129,48 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         return_value.to_rvalue()
     }
 
-    fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
+    fn compare_exchange(
+        &self,
+        dst: RValue<'gcc>,
+        cmp: LValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+        failure_order: AtomicOrdering,
+        weak: bool,
+    ) -> RValue<'gcc> {
         let size = src.get_type().get_size();
-        let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
+        let compare_exchange =
+            self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
         let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
         let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
         let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
 
         let void_ptr_type = self.context.new_type::<*mut ()>();
         let volatile_void_ptr_type = void_ptr_type.make_volatile();
-        let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
-        let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
+        let dst = self.context.new_cast(self.location, dst, volatile_void_ptr_type);
+        let expected =
+            self.context.new_cast(self.location, cmp.get_address(self.location), void_ptr_type);
 
         // NOTE: not sure why, but we have the wrong type here.
         let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
-        let src = self.context.new_cast(None, src, int_type);
-        self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
+        let src = self.context.new_cast(self.location, src, int_type);
+        self.context.new_call(
+            self.location,
+            compare_exchange,
+            &[dst, expected, src, weak, order, failure_order],
+        )
     }
 
     pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
-        self.llbb().add_assignment(None, lvalue, value);
+        self.llbb().add_assignment(self.location, lvalue, value);
     }
 
-    fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
+    fn check_call<'b>(
+        &mut self,
+        _typ: &str,
+        func: Function<'gcc>,
+        args: &'b [RValue<'gcc>],
+    ) -> Cow<'b, [RValue<'gcc>]> {
         let mut all_args_match = true;
         let mut param_types = vec![];
         let param_count = func.get_param_count();
@@ -173,8 +195,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                 let actual_ty = actual_val.get_type();
                 if expected_ty != actual_ty {
                     self.bitcast(actual_val, expected_ty)
-                }
-                else {
+                } else {
                     actual_val
                 }
             })
@@ -185,7 +206,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         Cow::Owned(casted_args)
     }
 
-    fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
+    fn check_ptr_call<'b>(
+        &mut self,
+        _typ: &str,
+        func_ptr: RValue<'gcc>,
+        args: &'b [RValue<'gcc>],
+    ) -> Cow<'b, [RValue<'gcc>]> {
         let mut all_args_match = true;
         let mut param_types = vec![];
         let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
@@ -219,20 +245,32 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 
                 let actual_ty = actual_val.get_type();
                 if expected_ty != actual_ty {
-                    if !actual_ty.is_vector() && !expected_ty.is_vector() && (actual_ty.is_integral() && expected_ty.is_integral()) || (actual_ty.get_pointee().is_some() && expected_ty.get_pointee().is_some()) {
-                        self.context.new_cast(None, actual_val, expected_ty)
-                    }
-                    else if on_stack_param_indices.contains(&index) {
-                        actual_val.dereference(None).to_rvalue()
-                    }
-                    else {
-                        assert!(!((actual_ty.is_vector() && !expected_ty.is_vector()) || (!actual_ty.is_vector() && expected_ty.is_vector())), "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", actual_ty, actual_ty.is_vector(), expected_ty, expected_ty.is_vector(), func_ptr, index);
+                    if !actual_ty.is_vector()
+                        && !expected_ty.is_vector()
+                        && (actual_ty.is_integral() && expected_ty.is_integral())
+                        || (actual_ty.get_pointee().is_some()
+                            && expected_ty.get_pointee().is_some())
+                    {
+                        self.context.new_cast(self.location, actual_val, expected_ty)
+                    } else if on_stack_param_indices.contains(&index) {
+                        actual_val.dereference(self.location).to_rvalue()
+                    } else {
+                        assert!(
+                            !((actual_ty.is_vector() && !expected_ty.is_vector())
+                                || (!actual_ty.is_vector() && expected_ty.is_vector())),
+                            "{:?} ({}) -> {:?} ({}), index: {:?}[{}]",
+                            actual_ty,
+                            actual_ty.is_vector(),
+                            expected_ty,
+                            expected_ty.is_vector(),
+                            func_ptr,
+                            index
+                        );
                         // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
                         // TODO: remove bitcast now that vector types can be compared?
                         self.bitcast(actual_val, expected_ty)
                     }
-                }
-                else {
+                } else {
                     actual_val
                 }
             })
@@ -256,7 +294,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         self.block.get_function()
     }
 
-    fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+    fn function_call(
+        &mut self,
+        func: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
         // TODO(antoyo): remove when the API supports a different type for functions.
         let func: Function<'gcc> = self.cx.rvalue_as_function(func);
         let args = self.check_call("call", func, args);
@@ -268,35 +311,54 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let current_func = self.block.get_function();
         if return_type != void_type {
             unsafe { RETURN_VALUE_COUNT += 1 };
-            let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
-            self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+            let result = current_func.new_local(
+                self.location,
+                return_type,
+                &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }),
+            );
+            self.block.add_assignment(
+                self.location,
+                result,
+                self.cx.context.new_call(self.location, func, &args),
+            );
             result.to_rvalue()
-        }
-        else {
-            self.block.add_eval(None, self.cx.context.new_call(None, func, &args));
+        } else {
+            self.block
+                .add_eval(self.location, self.cx.context.new_call(self.location, func, &args));
             // Return dummy value when not having return value.
             self.context.new_rvalue_from_long(self.isize_type, 0)
         }
     }
 
-    fn function_ptr_call(&mut self, typ: Type<'gcc>, mut func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
-        let gcc_func =
-            match func_ptr.get_type().dyncast_function_ptr_type() {
-                Some(func) => func,
-                None => {
-                    // NOTE: due to opaque pointers now being used, we need to cast here.
-                    let new_func_type = typ.dyncast_function_ptr_type().expect("function ptr");
-                    func_ptr = self.context.new_cast(None, func_ptr, typ);
-                    new_func_type
-                },
-            };
+    fn function_ptr_call(
+        &mut self,
+        typ: Type<'gcc>,
+        mut func_ptr: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
+        let gcc_func = match func_ptr.get_type().dyncast_function_ptr_type() {
+            Some(func) => func,
+            None => {
+                // NOTE: due to opaque pointers now being used, we need to cast here.
+                let new_func_type = typ.dyncast_function_ptr_type().expect("function ptr");
+                func_ptr = self.context.new_cast(self.location, func_ptr, typ);
+                new_func_type
+            }
+        };
         let func_name = format!("{:?}", func_ptr);
         let previous_arg_count = args.len();
         let orig_args = args;
         let args = {
             let function_address_names = self.function_address_names.borrow();
             let original_function_name = function_address_names.get(&func_ptr);
-            llvm::adjust_intrinsic_arguments(&self, gcc_func, args.into(), &func_name, original_function_name)
+            llvm::adjust_intrinsic_arguments(
+                &self,
+                gcc_func,
+                args.into(),
+                &func_name,
+                original_function_name,
+            )
         };
         let args_adjusted = args.len() != previous_arg_count;
         let args = self.check_ptr_call("call", func_ptr, &*args);
@@ -309,39 +371,78 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 
         if return_type != void_type {
             unsafe { RETURN_VALUE_COUNT += 1 };
-            let return_value = self.cx.context.new_call_through_ptr(None, func_ptr, &args);
-            let return_value = llvm::adjust_intrinsic_return_value(&self, return_value, &func_name, &args, args_adjusted, orig_args);
-            let result = current_func.new_local(None, return_value.get_type(), &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
-            self.block.add_assignment(None, result, return_value);
+            let return_value = self.cx.context.new_call_through_ptr(self.location, func_ptr, &args);
+            let return_value = llvm::adjust_intrinsic_return_value(
+                &self,
+                return_value,
+                &func_name,
+                &args,
+                args_adjusted,
+                orig_args,
+            );
+            let result = current_func.new_local(
+                self.location,
+                return_value.get_type(),
+                &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }),
+            );
+            self.block.add_assignment(self.location, result, return_value);
             result.to_rvalue()
-        }
-        else {
-            #[cfg(not(feature="master"))]
+        } else {
+            #[cfg(not(feature = "master"))]
             if gcc_func.get_param_count() == 0 {
                 // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
-                self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
-            }
-            else {
-                self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+                self.block.add_eval(
+                    self.location,
+                    self.cx.context.new_call_through_ptr(self.location, func_ptr, &[]),
+                );
+            } else {
+                self.block.add_eval(
+                    self.location,
+                    self.cx.context.new_call_through_ptr(self.location, func_ptr, &args),
+                );
             }
-            #[cfg(feature="master")]
-            self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+            #[cfg(feature = "master")]
+            self.block.add_eval(
+                self.location,
+                self.cx.context.new_call_through_ptr(self.location, func_ptr, &args),
+            );
             // Return dummy value when not having return value.
-            let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
-            self.block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
+            let result = current_func.new_local(
+                self.location,
+                self.isize_type,
+                "dummyValueThatShouldNeverBeUsed",
+            );
+            self.block.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_from_long(self.isize_type, 0),
+            );
             result.to_rvalue()
         }
     }
 
-    pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+    pub fn overflow_call(
+        &self,
+        func: Function<'gcc>,
+        args: &[RValue<'gcc>],
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
         // gccjit requires to use the result of functions, even when it's not used.
         // That's why we assign the result to a local.
         let return_type = self.context.new_type::<bool>();
         let current_func = self.block.get_function();
         // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
         unsafe { RETURN_VALUE_COUNT += 1 };
-        let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
-        self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+        let result = current_func.new_local(
+            self.location,
+            return_type,
+            &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }),
+        );
+        self.block.add_assignment(
+            self.location,
+            result,
+            self.cx.context.new_call(self.location, func, &args),
+        );
         result.to_rvalue()
     }
 }
@@ -405,6 +506,17 @@ impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
     type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
 }
 
+fn set_rvalue_location<'a, 'gcc, 'tcx>(
+    bx: &mut Builder<'a, 'gcc, 'tcx>,
+    rvalue: RValue<'gcc>,
+) -> RValue<'gcc> {
+    if bx.location.is_some() {
+        #[cfg(feature = "master")]
+        rvalue.set_location(bx.location.unwrap());
+    }
+    rvalue
+}
+
 impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Builder<'a, 'gcc, 'tcx> {
         Builder::with_cx(cx, block)
@@ -429,43 +541,58 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     fn ret_void(&mut self) {
-        self.llbb().end_with_void_return(None)
+        self.llbb().end_with_void_return(self.location)
     }
 
     fn ret(&mut self, mut value: RValue<'gcc>) {
         if self.structs_as_pointer.borrow().contains(&value) {
             // NOTE: hack to workaround a limitation of the rustc API: see comment on
             // CodegenCx.structs_as_pointer
-            value = value.dereference(None).to_rvalue();
+            value = value.dereference(self.location).to_rvalue();
         }
         let expected_return_type = self.current_func().get_return_type();
         if !expected_return_type.is_compatible_with(value.get_type()) {
             // NOTE: due to opaque pointers now being used, we need to cast here.
-            value = self.context.new_cast(None, value, expected_return_type);
+            value = self.context.new_cast(self.location, value, expected_return_type);
         }
-        self.llbb().end_with_return(None, value);
+        self.llbb().end_with_return(self.location, value);
     }
 
     fn br(&mut self, dest: Block<'gcc>) {
-        self.llbb().end_with_jump(None, dest)
+        self.llbb().end_with_jump(self.location, dest)
     }
 
     fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
-        self.llbb().end_with_conditional(None, cond, then_block, else_block)
+        self.llbb().end_with_conditional(self.location, cond, then_block, else_block)
     }
 
-    fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
+    fn switch(
+        &mut self,
+        value: RValue<'gcc>,
+        default_block: Block<'gcc>,
+        cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>,
+    ) {
         let mut gcc_cases = vec![];
         let typ = self.val_ty(value);
         for (on_val, dest) in cases {
             let on_val = self.const_uint_big(typ, on_val);
             gcc_cases.push(self.context.new_case(on_val, on_val, dest));
         }
-        self.block.end_with_switch(None, value, default_block, &gcc_cases);
+        self.block.end_with_switch(self.location, value, default_block, &gcc_cases);
     }
 
-    #[cfg(feature="master")]
-    fn invoke(&mut self, typ: Type<'gcc>, fn_attrs: Option<&CodegenFnAttrs>, _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
+    #[cfg(feature = "master")]
+    fn invoke(
+        &mut self,
+        typ: Type<'gcc>,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        func: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        then: Block<'gcc>,
+        catch: Block<'gcc>,
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
         let try_block = self.current_func().new_block("try");
 
         let current_block = self.block.clone();
@@ -473,30 +600,39 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let call = self.call(typ, fn_attrs, None, func, args, None); // TODO(antoyo): use funclet here?
         self.block = current_block;
 
-        let return_value = self.current_func()
-            .new_local(None, call.get_type(), "invokeResult");
+        let return_value =
+            self.current_func().new_local(self.location, call.get_type(), "invokeResult");
 
-        try_block.add_assignment(None, return_value, call);
+        try_block.add_assignment(self.location, return_value, call);
 
-        try_block.end_with_jump(None, then);
+        try_block.end_with_jump(self.location, then);
 
         if self.cleanup_blocks.borrow().contains(&catch) {
-            self.block.add_try_finally(None, try_block, catch);
-        }
-        else {
-            self.block.add_try_catch(None, try_block, catch);
+            self.block.add_try_finally(self.location, try_block, catch);
+        } else {
+            self.block.add_try_catch(self.location, try_block, catch);
         }
 
-        self.block.end_with_jump(None, then);
+        self.block.end_with_jump(self.location, then);
 
         return_value.to_rvalue()
     }
 
-    #[cfg(not(feature="master"))]
-    fn invoke(&mut self, typ: Type<'gcc>, fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
+    #[cfg(not(feature = "master"))]
+    fn invoke(
+        &mut self,
+        typ: Type<'gcc>,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        func: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        then: Block<'gcc>,
+        catch: Block<'gcc>,
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
         let call_site = self.call(typ, fn_attrs, None, func, args, None);
         let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
-        self.llbb().end_with_conditional(None, condition, then, catch);
+        self.llbb().end_with_conditional(self.location, condition, then, catch);
         if let Some(_fn_abi) = fn_abi {
             // TODO(bjorn3): Apply function attributes
         }
@@ -505,16 +641,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
 
     fn unreachable(&mut self) {
         let func = self.context.get_builtin_function("__builtin_unreachable");
-        self.block.add_eval(None, self.context.new_call(None, func, &[]));
+        self.block.add_eval(self.location, self.context.new_call(self.location, func, &[]));
         let return_type = self.block.get_function().get_return_type();
         let void_type = self.context.new_type::<()>();
         if return_type == void_type {
-            self.block.end_with_void_return(None)
-        }
-        else {
-            let return_value = self.current_func()
-                .new_local(None, return_type, "unreachableReturn");
-            self.block.end_with_return(None, return_value)
+            self.block.end_with_void_return(self.location)
+        } else {
+            let return_value =
+                self.current_func().new_local(self.location, return_type, "unreachableReturn");
+            self.block.end_with_return(self.location, return_value)
         }
     }
 
@@ -539,7 +674,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        a * b
+        self.cx.context.new_binary_op(self.location, BinaryOp::Mult, a.get_type(), a, b)
     }
 
     fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
@@ -564,7 +699,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
         // should be the same.
         let typ = a.get_type().to_signed(self);
-        let b = self.context.new_cast(None, b, typ);
+        let b = self.context.new_cast(self.location, b, typ);
         a / b
     }
 
@@ -606,15 +741,32 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         //     ../../../gcc/gcc/cfgexpand.cc:6069
         // 0x7f0101bf9194 execute
         //     ../../../gcc/gcc/cfgexpand.cc:6795
-        if a.get_type().is_compatible_with(self.cx.float_type) {
+        let a_type = a.get_type();
+        let a_type_unqualified = a_type.unqualified();
+        if a_type.is_compatible_with(self.cx.float_type) {
             let fmodf = self.context.get_builtin_function("fmodf");
             // FIXME(antoyo): this seems to produce the wrong result.
-            return self.context.new_call(None, fmodf, &[a, b]);
+            return self.context.new_call(self.location, fmodf, &[a, b]);
+        }
+        if let Some(vector_type) = a_type_unqualified.dyncast_vector() {
+            assert_eq!(a_type_unqualified, b.get_type().unqualified());
+
+            let num_units = vector_type.get_num_units();
+            let new_elements: Vec<_> = (0..num_units)
+                .map(|i| {
+                    let index = self.context.new_rvalue_from_long(self.cx.type_u32(), i as _);
+                    let x = self.extract_element(a, index).to_rvalue();
+                    let y = self.extract_element(b, index).to_rvalue();
+                    self.frem(x, y)
+                })
+                .collect();
+
+            return self.context.new_rvalue_from_vector(self.location, a_type, &new_elements);
         }
-        assert_eq!(a.get_type().unqualified(), self.cx.double_type);
+        assert_eq!(a_type_unqualified, self.cx.double_type);
 
         let fmod = self.context.get_builtin_function("fmod");
-        return self.context.new_call(None, fmod, &[a, b]);
+        self.context.new_call(self.location, fmod, &[a, b])
     }
 
     fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
@@ -636,73 +788,78 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.cx.gcc_or(a, b)
+        self.cx.gcc_or(a, b, self.location)
     }
 
     fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_xor(a, b)
+        set_rvalue_location(self, self.gcc_xor(a, b))
     }
 
     fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_neg(a)
+        set_rvalue_location(self, self.gcc_neg(a))
     }
 
     fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
-        self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
+        set_rvalue_location(
+            self,
+            self.cx.context.new_unary_op(self.location, UnaryOp::Minus, a.get_type(), a),
+        )
     }
 
     fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_not(a)
+        set_rvalue_location(self, self.gcc_not(a))
     }
 
     fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_add(a, b)
+        set_rvalue_location(self, self.gcc_add(a, b))
     }
 
     fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_add(a, b)
+        set_rvalue_location(self, self.gcc_add(a, b))
     }
 
     fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_sub(a, b)
+        set_rvalue_location(self, self.gcc_sub(a, b))
     }
 
     fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
         // TODO(antoyo): should generate poison value?
-        self.gcc_sub(a, b)
+        set_rvalue_location(self, self.gcc_sub(a, b))
     }
 
     fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_mul(a, b)
+        set_rvalue_location(self, self.gcc_mul(a, b))
     }
 
     fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_mul(a, b)
+        set_rvalue_location(self, self.gcc_mul(a, b))
     }
 
     fn fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs + rhs
+        set_rvalue_location(self, lhs + rhs)
     }
 
     fn fsub_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs - rhs
+        set_rvalue_location(self, lhs - rhs)
     }
 
     fn fmul_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs * rhs
+        set_rvalue_location(self, lhs * rhs)
     }
 
     fn fdiv_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs / rhs
+        set_rvalue_location(self, lhs / rhs)
     }
 
     fn frem_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        self.frem(lhs, rhs)
+        let result = self.frem(lhs, rhs);
+        set_rvalue_location(self, result);
+        result
     }
 
     fn fadd_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
@@ -730,23 +887,33 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         self.frem(lhs, rhs)
     }
 
-    fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
+    fn checked_binop(
+        &mut self,
+        oop: OverflowOp,
+        typ: Ty<'_>,
+        lhs: Self::Value,
+        rhs: Self::Value,
+    ) -> (Self::Value, Self::Value) {
         self.gcc_checked_binop(oop, typ, lhs, rhs)
     }
 
     fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
         // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
         // Ideally, we shouldn't need to do this check.
-        let aligned_type =
-            if ty == self.cx.u128_type || ty == self.cx.i128_type {
-                ty
-            }
-            else {
-                ty.get_aligned(align.bytes())
-            };
+        let aligned_type = if ty == self.cx.u128_type || ty == self.cx.i128_type {
+            ty
+        } else {
+            ty.get_aligned(align.bytes())
+        };
         // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
         self.stack_var_count.set(self.stack_var_count.get() + 1);
-        self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
+        self.current_func()
+            .new_local(
+                self.location,
+                aligned_type,
+                &format!("stack_var_{}", self.stack_var_count.get()),
+            )
+            .get_address(self.location)
     }
 
     fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
@@ -761,48 +928,62 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         // dereference after a drop, for instance.
         // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
         // Ideally, we shouldn't need to do this check.
-        let aligned_type =
-            if pointee_ty == self.cx.u128_type || pointee_ty == self.cx.i128_type {
-                pointee_ty
-            }
-            else {
-                pointee_ty.get_aligned(align.bytes())
-            };
-        let ptr = self.context.new_cast(None, ptr, aligned_type.make_pointer());
-        let deref = ptr.dereference(None).to_rvalue();
+        let aligned_type = if pointee_ty == self.cx.u128_type || pointee_ty == self.cx.i128_type {
+            pointee_ty
+        } else {
+            pointee_ty.get_aligned(align.bytes())
+        };
+        let ptr = self.context.new_cast(self.location, ptr, aligned_type.make_pointer());
+        let deref = ptr.dereference(self.location).to_rvalue();
         unsafe { RETURN_VALUE_COUNT += 1 };
-        let loaded_value = function.new_local(None, aligned_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
-        block.add_assignment(None, loaded_value, deref);
+        let loaded_value = function.new_local(
+            self.location,
+            aligned_type,
+            &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }),
+        );
+        block.add_assignment(self.location, loaded_value, deref);
         loaded_value.to_rvalue()
     }
 
     fn volatile_load(&mut self, ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
-        let ptr = self.context.new_cast(None, ptr, ty.make_volatile().make_pointer());
-        ptr.dereference(None).to_rvalue()
+        let ptr = self.context.new_cast(self.location, ptr, ty.make_volatile().make_pointer());
+        ptr.dereference(self.location).to_rvalue()
     }
 
-    fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
+    fn atomic_load(
+        &mut self,
+        _ty: Type<'gcc>,
+        ptr: RValue<'gcc>,
+        order: AtomicOrdering,
+        size: Size,
+    ) -> RValue<'gcc> {
         // TODO(antoyo): use ty.
         // TODO(antoyo): handle alignment.
-        let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
+        let atomic_load =
+            self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
         let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
 
-        let volatile_const_void_ptr_type = self.context.new_type::<()>()
-            .make_const()
-            .make_volatile()
-            .make_pointer();
-        let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
-        self.context.new_call(None, atomic_load, &[ptr, ordering])
+        let volatile_const_void_ptr_type =
+            self.context.new_type::<()>().make_const().make_volatile().make_pointer();
+        let ptr = self.context.new_cast(self.location, ptr, volatile_const_void_ptr_type);
+        self.context.new_call(self.location, atomic_load, &[ptr, ordering])
     }
 
-    fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
+    fn load_operand(
+        &mut self,
+        place: PlaceRef<'tcx, RValue<'gcc>>,
+    ) -> OperandRef<'tcx, RValue<'gcc>> {
         assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
 
         if place.layout.is_zst() {
             return OperandRef::zero_sized(place.layout);
         }
 
-        fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
+        fn scalar_load_metadata<'a, 'gcc, 'tcx>(
+            bx: &mut Builder<'a, 'gcc, 'tcx>,
+            load: RValue<'gcc>,
+            scalar: &abi::Scalar,
+        ) {
             let vr = scalar.valid_range(bx);
             match scalar.primitive() {
                 abi::Int(..) => {
@@ -817,49 +998,50 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
             }
         }
 
-        let val =
-            if let Some(llextra) = place.llextra {
-                OperandValue::Ref(place.llval, Some(llextra), place.align)
-            }
-            else if place.layout.is_gcc_immediate() {
-                let load = self.load(
-                    place.layout.gcc_type(self),
-                    place.llval,
-                    place.align,
-                );
-                if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
-                    scalar_load_metadata(self, load, scalar);
-                }
-                OperandValue::Immediate(self.to_immediate(load, place.layout))
+        let val = if let Some(llextra) = place.llextra {
+            OperandValue::Ref(place.llval, Some(llextra), place.align)
+        } else if place.layout.is_gcc_immediate() {
+            let load = self.load(place.layout.gcc_type(self), place.llval, place.align);
+            if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
+                scalar_load_metadata(self, load, scalar);
             }
-            else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
-                let b_offset = a.size(self).align_to(b.align(self).abi);
-
-                let mut load = |i, scalar: &abi::Scalar, align| {
-                    let llptr = if i == 0 {
-                        place.llval
-                    } else {
-                        self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
-                    };
-                    let llty = place.layout.scalar_pair_element_gcc_type(self, i);
-                    let load = self.load(llty, llptr, align);
-                    scalar_load_metadata(self, load, scalar);
-                    if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
+            OperandValue::Immediate(self.to_immediate(load, place.layout))
+        } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
+            let b_offset = a.size(self).align_to(b.align(self).abi);
+
+            let mut load = |i, scalar: &abi::Scalar, align| {
+                let llptr = if i == 0 {
+                    place.llval
+                } else {
+                    self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
                 };
-
-                OperandValue::Pair(
-                    load(0, a, place.align),
-                    load(1, b, place.align.restrict_for_offset(b_offset)),
-                )
-            }
-            else {
-                OperandValue::Ref(place.llval, None, place.align)
+                let llty = place.layout.scalar_pair_element_gcc_type(self, i);
+                let load = self.load(llty, llptr, align);
+                scalar_load_metadata(self, load, scalar);
+                if scalar.is_bool() {
+                    self.trunc(load, self.type_i1())
+                } else {
+                    load
+                }
             };
 
+            OperandValue::Pair(
+                load(0, a, place.align),
+                load(1, b, place.align.restrict_for_offset(b_offset)),
+            )
+        } else {
+            OperandValue::Ref(place.llval, None, place.align)
+        };
+
         OperandRef { val, layout: place.layout }
     }
 
-    fn write_operand_repeatedly(&mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) {
+    fn write_operand_repeatedly(
+        &mut self,
+        cg_elem: OperandRef<'tcx, RValue<'gcc>>,
+        count: u64,
+        dest: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
         let zero = self.const_usize(0);
         let count = self.const_usize(count);
         let start = dest.project_index(self, zero).llval;
@@ -870,7 +1052,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let next_bb = self.append_sibling_block("repeat_loop_next");
 
         let ptr_type = start.get_type();
-        let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
+        let current = self.llbb().get_function().new_local(self.location, ptr_type, "loop_var");
         let current_val = current.to_rvalue();
         self.assign(current, start);
 
@@ -884,8 +1066,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
         cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
 
-        let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
-        self.llbb().add_assignment(None, current, next);
+        let next = self.inbounds_gep(
+            self.backend_type(cg_elem.layout),
+            current.to_rvalue(),
+            &[self.const_usize(1)],
+        );
+        self.llbb().add_assignment(self.location, current, next);
         self.br(header_bb);
 
         self.switch_to_block(next_bb);
@@ -903,75 +1089,100 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         self.store_with_flags(val, ptr, align, MemFlags::empty())
     }
 
-    fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align, _flags: MemFlags) -> RValue<'gcc> {
+    fn store_with_flags(
+        &mut self,
+        val: RValue<'gcc>,
+        ptr: RValue<'gcc>,
+        align: Align,
+        _flags: MemFlags,
+    ) -> RValue<'gcc> {
         let ptr = self.check_store(val, ptr);
-        let destination = ptr.dereference(None);
+        let destination = ptr.dereference(self.location);
         // NOTE: libgccjit does not support specifying the alignment on the assignment, so we cast
         // to type so it gets the proper alignment.
         let destination_type = destination.to_rvalue().get_type().unqualified();
         let aligned_type = destination_type.get_aligned(align.bytes()).make_pointer();
-        let aligned_destination = self.cx.context.new_bitcast(None, ptr, aligned_type);
-        let aligned_destination = aligned_destination.dereference(None);
-        self.llbb().add_assignment(None, aligned_destination, val);
+        let aligned_destination = self.cx.context.new_bitcast(self.location, ptr, aligned_type);
+        let aligned_destination = aligned_destination.dereference(self.location);
+        self.llbb().add_assignment(self.location, aligned_destination, val);
         // TODO(antoyo): handle align and flags.
         // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
         self.cx.context.new_rvalue_zero(self.type_i32())
     }
 
-    fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
+    fn atomic_store(
+        &mut self,
+        value: RValue<'gcc>,
+        ptr: RValue<'gcc>,
+        order: AtomicOrdering,
+        size: Size,
+    ) {
         // TODO(antoyo): handle alignment.
-        let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
+        let atomic_store =
+            self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
         let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
-        let volatile_const_void_ptr_type = self.context.new_type::<()>()
-            .make_volatile()
-            .make_pointer();
-        let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
+        let volatile_const_void_ptr_type =
+            self.context.new_type::<()>().make_volatile().make_pointer();
+        let ptr = self.context.new_cast(self.location, ptr, volatile_const_void_ptr_type);
 
         // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
         // the following cast is required to avoid this error:
         // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int  __attribute__((aligned(4))))
         let int_type = atomic_store.get_param(1).to_rvalue().get_type();
-        let value = self.context.new_cast(None, value, int_type);
-        self.llbb()
-            .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
+        let value = self.context.new_cast(self.location, value, int_type);
+        self.llbb().add_eval(
+            self.location,
+            self.context.new_call(self.location, atomic_store, &[ptr, value, ordering]),
+        );
     }
 
-    fn gep(&mut self, typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+    fn gep(
+        &mut self,
+        typ: Type<'gcc>,
+        ptr: RValue<'gcc>,
+        indices: &[RValue<'gcc>],
+    ) -> RValue<'gcc> {
         // NOTE: due to opaque pointers now being used, we need to cast here.
-        let ptr = self.context.new_cast(None, ptr, typ.make_pointer());
+        let ptr = self.context.new_cast(self.location, ptr, typ.make_pointer());
         let ptr_type = ptr.get_type();
         let mut pointee_type = ptr.get_type();
         // NOTE: we cannot use array indexing here like in inbounds_gep because array indexing is
         // always considered in bounds in GCC (TODO(antoyo): to be verified).
         // So, we have to cast to a number.
-        let mut result = self.context.new_bitcast(None, ptr, self.sizet_type);
+        let mut result = self.context.new_bitcast(self.location, ptr, self.sizet_type);
         // FIXME(antoyo): if there were more than 1 index, this code is probably wrong and would
         // require dereferencing the pointer.
         for index in indices {
             pointee_type = pointee_type.get_pointee().expect("pointee type");
-            #[cfg(feature="master")]
+            #[cfg(feature = "master")]
             let pointee_size = {
                 let size = self.cx.context.new_sizeof(pointee_type);
-                self.context.new_cast(None, size, index.get_type())
+                self.context.new_cast(self.location, size, index.get_type())
             };
-            #[cfg(not(feature="master"))]
-            let pointee_size = self.context.new_rvalue_from_int(index.get_type(), pointee_type.get_size() as i32);
+            #[cfg(not(feature = "master"))]
+            let pointee_size =
+                self.context.new_rvalue_from_int(index.get_type(), pointee_type.get_size() as i32);
             result = result + self.gcc_int_cast(*index * pointee_size, self.sizet_type);
         }
-        self.context.new_bitcast(None, result, ptr_type)
+        self.context.new_bitcast(self.location, result, ptr_type)
     }
 
-    fn inbounds_gep(&mut self, typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+    fn inbounds_gep(
+        &mut self,
+        typ: Type<'gcc>,
+        ptr: RValue<'gcc>,
+        indices: &[RValue<'gcc>],
+    ) -> RValue<'gcc> {
         // NOTE: due to opaque pointers now being used, we need to cast here.
-        let ptr = self.context.new_cast(None, ptr, typ.make_pointer());
+        let ptr = self.context.new_cast(self.location, ptr, typ.make_pointer());
         // NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified).
         let mut indices = indices.into_iter();
         let index = indices.next().expect("first index in inbounds_gep");
-        let mut result = self.context.new_array_access(None, ptr, *index);
+        let mut result = self.context.new_array_access(self.location, ptr, *index);
         for index in indices {
-            result = self.context.new_array_access(None, result, *index);
+            result = self.context.new_array_access(self.location, result, *index);
         }
-        result.get_address(None)
+        result.get_address(self.location)
     }
 
     /* Casts */
@@ -986,32 +1197,32 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
             // TODO(antoyo): nothing to do as it is only for LLVM?
             return value;
         }
-        self.context.new_cast(None, value, dest_ty)
+        self.context.new_cast(self.location, value, dest_ty)
     }
 
     fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_float_to_uint_cast(value, dest_ty)
+        set_rvalue_location(self, self.gcc_float_to_uint_cast(value, dest_ty))
     }
 
     fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_float_to_int_cast(value, dest_ty)
+        set_rvalue_location(self, self.gcc_float_to_int_cast(value, dest_ty))
     }
 
     fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_uint_to_float_cast(value, dest_ty)
+        set_rvalue_location(self, self.gcc_uint_to_float_cast(value, dest_ty))
     }
 
     fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_int_to_float_cast(value, dest_ty)
+        set_rvalue_location(self, self.gcc_int_to_float_cast(value, dest_ty))
     }
 
     fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
         // TODO(antoyo): make sure it truncates.
-        self.context.new_cast(None, value, dest_ty)
+        set_rvalue_location(self, self.context.new_cast(self.location, value, dest_ty))
     }
 
     fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.context.new_cast(None, value, dest_ty)
+        set_rvalue_location(self, self.context.new_cast(self.location, value, dest_ty))
     }
 
     fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
@@ -1028,7 +1239,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         self.cx.const_bitcast(value, dest_ty)
     }
 
-    fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
+    fn intcast(
+        &mut self,
+        value: RValue<'gcc>,
+        dest_typ: Type<'gcc>,
+        _is_signed: bool,
+    ) -> RValue<'gcc> {
         // NOTE: is_signed is for value, not dest_typ.
         self.gcc_int_cast(value, dest_typ)
     }
@@ -1039,13 +1255,17 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
             (false, true) => {
                 // NOTE: Projecting a field of a pointer type will attempt a cast from a signed char to
                 // a pointer, which is not supported by gccjit.
-                return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
-            },
+                self.cx.context.new_cast(
+                    self.location,
+                    self.inttoptr(value, val_type.make_pointer()),
+                    dest_ty,
+                )
+            }
             (false, false) => {
                 // When they are not pointers, we want a transmute (or reinterpret_cast).
                 self.bitcast(value, dest_ty)
-            },
-            (true, true) => self.cx.context.new_cast(None, value, dest_ty),
+            }
+            (true, true) => self.cx.context.new_cast(self.location, value, dest_ty),
             (true, false) => unimplemented!(),
         }
     }
@@ -1056,11 +1276,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
-        self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+        self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs)
     }
 
     /* Miscellaneous instructions */
-    fn memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+    fn memcpy(
+        &mut self,
+        dst: RValue<'gcc>,
+        _dst_align: Align,
+        src: RValue<'gcc>,
+        _src_align: Align,
+        size: RValue<'gcc>,
+        flags: MemFlags,
+    ) {
         assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
         let size = self.intcast(size, self.type_size_t(), false);
         let _is_volatile = flags.contains(MemFlags::VOLATILE);
@@ -1068,10 +1296,21 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
         let memcpy = self.context.get_builtin_function("memcpy");
         // TODO(antoyo): handle aligns and is_volatile.
-        self.block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
+        self.block.add_eval(
+            self.location,
+            self.context.new_call(self.location, memcpy, &[dst, src, size]),
+        );
     }
 
-    fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+    fn memmove(
+        &mut self,
+        dst: RValue<'gcc>,
+        dst_align: Align,
+        src: RValue<'gcc>,
+        src_align: Align,
+        size: RValue<'gcc>,
+        flags: MemFlags,
+    ) {
         if flags.contains(MemFlags::NONTEMPORAL) {
             // HACK(nox): This is inefficient but there is no nontemporal memmove.
             let val = self.load(src.get_type().get_pointee().expect("get_pointee"), src, src_align);
@@ -1086,35 +1325,53 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
 
         let memmove = self.context.get_builtin_function("memmove");
         // TODO(antoyo): handle is_volatile.
-        self.block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
+        self.block.add_eval(
+            self.location,
+            self.context.new_call(self.location, memmove, &[dst, src, size]),
+        );
     }
 
-    fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
+    fn memset(
+        &mut self,
+        ptr: RValue<'gcc>,
+        fill_byte: RValue<'gcc>,
+        size: RValue<'gcc>,
+        _align: Align,
+        flags: MemFlags,
+    ) {
         let _is_volatile = flags.contains(MemFlags::VOLATILE);
         let ptr = self.pointercast(ptr, self.type_i8p());
         let memset = self.context.get_builtin_function("memset");
         // TODO(antoyo): handle align and is_volatile.
-        let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
+        let fill_byte = self.context.new_cast(self.location, fill_byte, self.i32_type);
         let size = self.intcast(size, self.type_size_t(), false);
-        self.block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
+        self.block.add_eval(
+            self.location,
+            self.context.new_call(self.location, memset, &[ptr, fill_byte, size]),
+        );
     }
 
-    fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
+    fn select(
+        &mut self,
+        cond: RValue<'gcc>,
+        then_val: RValue<'gcc>,
+        mut else_val: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         let func = self.current_func();
-        let variable = func.new_local(None, then_val.get_type(), "selectVar");
+        let variable = func.new_local(self.location, then_val.get_type(), "selectVar");
         let then_block = func.new_block("then");
         let else_block = func.new_block("else");
         let after_block = func.new_block("after");
-        self.llbb().end_with_conditional(None, cond, then_block, else_block);
+        self.llbb().end_with_conditional(self.location, cond, then_block, else_block);
 
-        then_block.add_assignment(None, variable, then_val);
-        then_block.end_with_jump(None, after_block);
+        then_block.add_assignment(self.location, variable, then_val);
+        then_block.end_with_jump(self.location, after_block);
 
         if !then_val.get_type().is_compatible_with(else_val.get_type()) {
-            else_val = self.context.new_cast(None, else_val, then_val.get_type());
+            else_val = self.context.new_cast(self.location, else_val, then_val.get_type());
         }
-        else_block.add_assignment(None, variable, else_val);
-        else_block.end_with_jump(None, after_block);
+        else_block.add_assignment(self.location, variable, else_val);
+        else_block.end_with_jump(self.location, after_block);
 
         // NOTE: since jumps were added in a place rustc does not expect, the current block in the
         // state need to be updated.
@@ -1128,19 +1385,24 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         unimplemented!();
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> {
-        self.context.new_vector_access(None, vec, idx).to_rvalue()
+        self.context.new_vector_access(self.location, vec, idx).to_rvalue()
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> {
-        let vector_type = vec.get_type().unqualified().dyncast_vector().expect("Called extract_element on a non-vector type");
+        let vector_type = vec
+            .get_type()
+            .unqualified()
+            .dyncast_vector()
+            .expect("Called extract_element on a non-vector type");
         let element_type = vector_type.get_element_type();
         let vec_num_units = vector_type.get_num_units();
-        let array_type = self.context.new_array_type(None, element_type, vec_num_units as u64);
-        let array = self.context.new_bitcast(None, vec, array_type).to_rvalue();
-        self.context.new_array_access(None, array, idx).to_rvalue()
+        let array_type =
+            self.context.new_array_type(self.location, element_type, vec_num_units as u64);
+        let array = self.context.new_bitcast(self.location, vec, array_type).to_rvalue();
+        self.context.new_array_access(self.location, array, idx).to_rvalue()
     }
 
     fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
@@ -1153,82 +1415,85 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let value_type = aggregate_value.get_type();
 
         if value_type.dyncast_array().is_some() {
-            let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
-            let element = self.context.new_array_access(None, aggregate_value, index);
-            element.get_address(None)
-        }
-        else if value_type.dyncast_vector().is_some() {
+            let index = self
+                .context
+                .new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+            let element = self.context.new_array_access(self.location, aggregate_value, index);
+            element.get_address(self.location)
+        } else if value_type.dyncast_vector().is_some() {
             panic!();
-        }
-        else if let Some(pointer_type) = value_type.get_pointee() {
+        } else if let Some(pointer_type) = value_type.get_pointee() {
             if let Some(struct_type) = pointer_type.is_struct() {
                 // NOTE: hack to workaround a limitation of the rustc API: see comment on
                 // CodegenCx.structs_as_pointer
-                aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
-            }
-            else {
+                aggregate_value
+                    .dereference_field(self.location, struct_type.get_field(idx as i32))
+                    .to_rvalue()
+            } else {
                 panic!("Unexpected type {:?}", value_type);
             }
-        }
-        else if let Some(struct_type) = value_type.is_struct() {
-            aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
-        }
-        else {
+        } else if let Some(struct_type) = value_type.is_struct() {
+            aggregate_value
+                .access_field(self.location, struct_type.get_field(idx as i32))
+                .to_rvalue()
+        } else {
             panic!("Unexpected type {:?}", value_type);
         }
     }
 
-    fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+    fn insert_value(
+        &mut self,
+        aggregate_value: RValue<'gcc>,
+        value: RValue<'gcc>,
+        idx: u64,
+    ) -> RValue<'gcc> {
         // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
         assert_eq!(idx as usize as u64, idx);
         let value_type = aggregate_value.get_type();
 
-        let lvalue =
-            if value_type.dyncast_array().is_some() {
-                let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
-                self.context.new_array_access(None, aggregate_value, index)
-            }
-            else if value_type.dyncast_vector().is_some() {
-                panic!();
-            }
-            else if let Some(pointer_type) = value_type.get_pointee() {
-                if let Some(struct_type) = pointer_type.is_struct() {
-                    // NOTE: hack to workaround a limitation of the rustc API: see comment on
-                    // CodegenCx.structs_as_pointer
-                    aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
-                }
-                else {
-                    panic!("Unexpected type {:?}", value_type);
-                }
-            }
-            else {
+        let lvalue = if value_type.dyncast_array().is_some() {
+            let index = self
+                .context
+                .new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+            self.context.new_array_access(self.location, aggregate_value, index)
+        } else if value_type.dyncast_vector().is_some() {
+            panic!();
+        } else if let Some(pointer_type) = value_type.get_pointee() {
+            if let Some(struct_type) = pointer_type.is_struct() {
+                // NOTE: hack to workaround a limitation of the rustc API: see comment on
+                // CodegenCx.structs_as_pointer
+                aggregate_value.dereference_field(self.location, struct_type.get_field(idx as i32))
+            } else {
                 panic!("Unexpected type {:?}", value_type);
-            };
+            }
+        } else {
+            panic!("Unexpected type {:?}", value_type);
+        };
 
         let lvalue_type = lvalue.to_rvalue().get_type();
         let value =
             // NOTE: sometimes, rustc will create a value with the wrong type.
             if lvalue_type != value.get_type() {
-                self.context.new_cast(None, value, lvalue_type)
+                self.context.new_cast(self.location, value, lvalue_type)
             }
             else {
                 value
             };
 
-        self.llbb().add_assignment(None, lvalue, value);
+        self.llbb().add_assignment(self.location, lvalue, value);
 
         aggregate_value
     }
 
     fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
-        #[cfg(feature="master")]
+        #[cfg(feature = "master")]
         {
             let personality = self.rvalue_as_function(_personality);
             self.current_func().set_personality_function(personality);
         }
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     fn cleanup_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
         self.set_personality_fn(pers_fn);
 
@@ -1236,23 +1501,27 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         // generate a try/finally instead of a try/catch for this block.
         self.cleanup_blocks.borrow_mut().insert(self.block);
 
-        let eh_pointer_builtin = self.cx.context.get_target_builtin_function("__builtin_eh_pointer");
+        let eh_pointer_builtin =
+            self.cx.context.get_target_builtin_function("__builtin_eh_pointer");
         let zero = self.cx.context.new_rvalue_zero(self.int_type);
-        let ptr = self.cx.context.new_call(None, eh_pointer_builtin, &[zero]);
+        let ptr = self.cx.context.new_call(self.location, eh_pointer_builtin, &[zero]);
 
         let value1_type = self.u8_type.make_pointer();
-        let ptr = self.cx.context.new_cast(None, ptr, value1_type);
+        let ptr = self.cx.context.new_cast(self.location, ptr, value1_type);
         let value1 = ptr;
         let value2 = zero; // TODO(antoyo): set the proper value here (the type of exception?).
 
         (value1, value2)
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     fn cleanup_landing_pad(&mut self, _pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
-        let value1 = self.current_func().new_local(None, self.u8_type.make_pointer(), "landing_pad0")
-                .to_rvalue();
-        let value2 = self.current_func().new_local(None, self.i32_type, "landing_pad1").to_rvalue();
+        let value1 = self
+            .current_func()
+            .new_local(self.location, self.u8_type.make_pointer(), "landing_pad0")
+            .to_rvalue();
+        let value2 =
+            self.current_func().new_local(self.location, self.i32_type, "landing_pad1").to_rvalue();
         (value1, value2)
     }
 
@@ -1261,16 +1530,17 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         self.cleanup_landing_pad(pers_fn)
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     fn resume(&mut self, exn0: RValue<'gcc>, _exn1: RValue<'gcc>) {
         let exn_type = exn0.get_type();
-        let exn = self.context.new_cast(None, exn0, exn_type);
+        let exn = self.context.new_cast(self.location, exn0, exn_type);
         let unwind_resume = self.context.get_target_builtin_function("__builtin_unwind_resume");
-        self.llbb().add_eval(None, self.context.new_call(None, unwind_resume, &[exn]));
+        self.llbb()
+            .add_eval(self.location, self.context.new_call(self.location, unwind_resume, &[exn]));
         self.unreachable();
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     fn resume(&mut self, _exn0: RValue<'gcc>, _exn1: RValue<'gcc>) {
         self.unreachable();
     }
@@ -1297,68 +1567,82 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     // Atomic Operations
-    fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> (RValue<'gcc>, RValue<'gcc>) {
+    fn atomic_cmpxchg(
+        &mut self,
+        dst: RValue<'gcc>,
+        cmp: RValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+        failure_order: AtomicOrdering,
+        weak: bool,
+    ) -> (RValue<'gcc>, RValue<'gcc>) {
         let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
         self.llbb().add_assignment(None, expected, cmp);
         // NOTE: gcc doesn't support a failure memory model that is stronger than the success
         // memory model.
-        let order =
-            if failure_order as i32 > order as i32 {
-                failure_order
-            }
-            else {
-                order
-            };
+        let order = if failure_order as i32 > order as i32 { failure_order } else { order };
         let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
 
         // NOTE: since success contains the call to the intrinsic, it must be added to the basic block before
         // expected so that we store expected after the call.
-        let success_var = self.current_func().new_local(None, self.bool_type, "success");
-        self.llbb().add_assignment(None, success_var, success);
+        let success_var = self.current_func().new_local(self.location, self.bool_type, "success");
+        self.llbb().add_assignment(self.location, success_var, success);
 
         (expected.to_rvalue(), success_var.to_rvalue())
     }
 
-    fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+    fn atomic_rmw(
+        &mut self,
+        op: AtomicRmwBinOp,
+        dst: RValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+    ) -> RValue<'gcc> {
         let size = src.get_type().get_size();
-        let name =
-            match op {
-                AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
-                AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
-                AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
-                AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
-                AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
-                AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
-                AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
-                AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
-                AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
-                AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
-                AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
-            };
-
+        let name = match op {
+            AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
+            AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
+            AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
+            AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
+            AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
+            AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
+            AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
+            AtomicRmwBinOp::AtomicMax => {
+                return self.atomic_extremum(ExtremumOperation::Max, dst, src, order);
+            }
+            AtomicRmwBinOp::AtomicMin => {
+                return self.atomic_extremum(ExtremumOperation::Min, dst, src, order);
+            }
+            AtomicRmwBinOp::AtomicUMax => {
+                return self.atomic_extremum(ExtremumOperation::Max, dst, src, order);
+            }
+            AtomicRmwBinOp::AtomicUMin => {
+                return self.atomic_extremum(ExtremumOperation::Min, dst, src, order);
+            }
+        };
 
         let atomic_function = self.context.get_builtin_function(name);
         let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
 
         let void_ptr_type = self.context.new_type::<*mut ()>();
         let volatile_void_ptr_type = void_ptr_type.make_volatile();
-        let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
+        let dst = self.context.new_cast(self.location, dst, volatile_void_ptr_type);
         // FIXME(antoyo): not sure why, but we have the wrong type here.
         let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
-        let src = self.context.new_cast(None, src, new_src_type);
-        let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
-        self.context.new_cast(None, res, src.get_type())
+        let src = self.context.new_cast(self.location, src, new_src_type);
+        let res = self.context.new_call(self.location, atomic_function, &[dst, src, order]);
+        self.context.new_cast(self.location, res, src.get_type())
     }
 
     fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
-        let name =
-            match scope {
-                SynchronizationScope::SingleThread => "__atomic_signal_fence",
-                SynchronizationScope::CrossThread => "__atomic_thread_fence",
-            };
+        let name = match scope {
+            SynchronizationScope::SingleThread => "__atomic_signal_fence",
+            SynchronizationScope::CrossThread => "__atomic_thread_fence",
+        };
         let thread_fence = self.context.get_builtin_function(name);
         let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
-        self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
+        self.llbb()
+            .add_eval(self.location, self.context.new_call(self.location, thread_fence, &[order]));
     }
 
     fn set_invariant_load(&mut self, load: RValue<'gcc>) {
@@ -1388,8 +1672,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let gcc_func = unsafe { std::mem::transmute(func) };
         let call = if self.functions.borrow().values().any(|value| *value == gcc_func) {
             self.function_call(func, args, funclet)
-        }
-        else {
+        } else {
             // If it's a not function that was defined, it's a function pointer.
             self.function_ptr_call(typ, func, args, funclet)
         };
@@ -1422,8 +1705,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
         if self.cx().val_ty(val) == self.cx().type_i1() {
             self.zext(val, self.cx().type_i8())
-        }
-        else {
+        } else {
             val
         }
     }
@@ -1443,13 +1725,24 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         self.fptoint_sat(true, val, dest_ty)
     }
 
-    fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
+    fn instrprof_increment(
+        &mut self,
+        _fn_name: RValue<'gcc>,
+        _hash: RValue<'gcc>,
+        _num_counters: RValue<'gcc>,
+        _index: RValue<'gcc>,
+    ) {
         unimplemented!();
     }
 }
 
 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
-    fn fptoint_sat(&mut self, signed: bool, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+    fn fptoint_sat(
+        &mut self,
+        signed: bool,
+        val: RValue<'gcc>,
+        dest_ty: Type<'gcc>,
+    ) -> RValue<'gcc> {
         let src_ty = self.cx.val_ty(val);
         let (float_ty, int_ty) = if self.cx.type_kind(src_ty) == TypeKind::Vector {
             assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
@@ -1486,10 +1779,18 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
         let int_max = |signed: bool, int_width: u64| -> u128 {
             let shift_amount = 128 - int_width;
-            if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
+            if signed {
+                i128::MAX as u128 >> shift_amount
+            } else {
+                u128::MAX >> shift_amount
+            }
         };
         let int_min = |signed: bool, int_width: u64| -> i128 {
-            if signed { i128::MIN >> (128 - int_width) } else { 0 }
+            if signed {
+                i128::MIN >> (128 - int_width)
+            } else {
+                0
+            }
         };
 
         let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
@@ -1573,7 +1874,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let zero = maybe_splat(self, zero);
 
         // Step 1 ...
-        let fptosui_result = if signed { self.fptosi(val, dest_ty) } else { self.fptoui(val, dest_ty) };
+        let fptosui_result =
+            if signed { self.fptosi(val, dest_ty) } else { self.fptoui(val, dest_ty) };
         let less_or_nan = self.fcmp(RealPredicate::RealULT, val, f_min);
         let greater = self.fcmp(RealPredicate::RealOGT, val, f_max);
 
@@ -1609,8 +1911,13 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         }
     }
 
-    #[cfg(feature="master")]
-    pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
+    #[cfg(feature = "master")]
+    pub fn shuffle_vector(
+        &mut self,
+        v1: RValue<'gcc>,
+        v2: RValue<'gcc>,
+        mask: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         let struct_type = mask.get_type().is_struct().expect("mask should be of struct type");
 
         // TODO(antoyo): use a recursive unqualified() here.
@@ -1620,21 +1927,23 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 
         let mask_num_units = struct_type.get_field_count();
         let mut vector_elements = vec![];
-        let mask_element_type =
-            if element_type.is_integral() {
-                element_type
+        let mask_element_type = if element_type.is_integral() {
+            element_type
+        } else {
+            #[cfg(feature = "master")]
+            {
+                self.cx.type_ix(element_type.get_size() as u64 * 8)
             }
-            else {
-                #[cfg(feature="master")]
-                {
-                    self.cx.type_ix(element_type.get_size() as u64 * 8)
-                }
-                #[cfg(not(feature="master"))]
-                self.int_type
-            };
+            #[cfg(not(feature = "master"))]
+            self.int_type
+        };
         for i in 0..mask_num_units {
             let field = struct_type.get_field(i as i32);
-            vector_elements.push(self.context.new_cast(None, mask.access_field(None, field).to_rvalue(), mask_element_type));
+            vector_elements.push(self.context.new_cast(
+                self.location,
+                mask.access_field(self.location, field).to_rvalue(),
+                mask_element_type,
+            ));
         }
 
         // NOTE: the mask needs to be the same length as the input vectors, so add the missing
@@ -1644,53 +1953,84 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         }
 
         let result_type = self.context.new_vector_type(element_type, mask_num_units as u64);
-        let (v1, v2) =
-            if vec_num_units < mask_num_units {
-                // NOTE: the mask needs to be the same length as the input vectors, so join the 2
-                // vectors and create a dummy second vector.
-                let mut elements = vec![];
-                for i in 0..vec_num_units {
-                    elements.push(self.context.new_vector_access(None, v1, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
-                }
-                for i in 0..(mask_num_units - vec_num_units) {
-                    elements.push(self.context.new_vector_access(None, v2, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
-                }
-                let v1 = self.context.new_rvalue_from_vector(None, result_type, &elements);
-                let zero = self.context.new_rvalue_zero(element_type);
-                let v2 = self.context.new_rvalue_from_vector(None, result_type, &vec![zero; mask_num_units]);
-                (v1, v2)
+        let (v1, v2) = if vec_num_units < mask_num_units {
+            // NOTE: the mask needs to be the same length as the input vectors, so join the 2
+            // vectors and create a dummy second vector.
+            let mut elements = vec![];
+            for i in 0..vec_num_units {
+                elements.push(
+                    self.context
+                        .new_vector_access(
+                            self.location,
+                            v1,
+                            self.context.new_rvalue_from_int(self.int_type, i as i32),
+                        )
+                        .to_rvalue(),
+                );
             }
-            else {
-                (v1, v2)
-            };
+            for i in 0..(mask_num_units - vec_num_units) {
+                elements.push(
+                    self.context
+                        .new_vector_access(
+                            self.location,
+                            v2,
+                            self.context.new_rvalue_from_int(self.int_type, i as i32),
+                        )
+                        .to_rvalue(),
+                );
+            }
+            let v1 = self.context.new_rvalue_from_vector(self.location, result_type, &elements);
+            let zero = self.context.new_rvalue_zero(element_type);
+            let v2 = self.context.new_rvalue_from_vector(
+                self.location,
+                result_type,
+                &vec![zero; mask_num_units],
+            );
+            (v1, v2)
+        } else {
+            (v1, v2)
+        };
 
         let new_mask_num_units = std::cmp::max(mask_num_units, vec_num_units);
         let mask_type = self.context.new_vector_type(mask_element_type, new_mask_num_units as u64);
-        let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
-        let result = self.context.new_rvalue_vector_perm(None, v1, v2, mask);
+        let mask = self.context.new_rvalue_from_vector(self.location, mask_type, &vector_elements);
+        let result = self.context.new_rvalue_vector_perm(self.location, v1, v2, mask);
 
         if vec_num_units != mask_num_units {
             // NOTE: if padding was added, only select the number of elements of the masks to
             // remove that padding in the result.
             let mut elements = vec![];
             for i in 0..mask_num_units {
-                elements.push(self.context.new_vector_access(None, result, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
+                elements.push(
+                    self.context
+                        .new_vector_access(
+                            self.location,
+                            result,
+                            self.context.new_rvalue_from_int(self.int_type, i as i32),
+                        )
+                        .to_rvalue(),
+                );
             }
-            self.context.new_rvalue_from_vector(None, result_type, &elements)
-        }
-        else {
+            self.context.new_rvalue_from_vector(self.location, result_type, &elements)
+        } else {
             result
         }
     }
 
-    #[cfg(not(feature="master"))]
-    pub fn shuffle_vector(&mut self, _v1: RValue<'gcc>, _v2: RValue<'gcc>, _mask: RValue<'gcc>) -> RValue<'gcc> {
+    #[cfg(not(feature = "master"))]
+    pub fn shuffle_vector(
+        &mut self,
+        _v1: RValue<'gcc>,
+        _v2: RValue<'gcc>,
+        _mask: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         unimplemented!();
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc>
-    where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
+    where
+        F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>,
     {
         let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
         let element_type = vector_type.get_element_type();
@@ -1704,130 +2044,178 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let mut shift = 1;
         let mut res = src;
         while shift < element_count {
-            let vector_elements: Vec<_> =
-                vector_elements.iter()
-                    .map(|i| self.context.new_rvalue_from_int(mask_element_type, ((i + shift) % element_count) as i32))
-                    .collect();
-            let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
-            let shifted = self.context.new_rvalue_vector_perm(None, res, res, mask);
+            let vector_elements: Vec<_> = vector_elements
+                .iter()
+                .map(|i| {
+                    self.context.new_rvalue_from_int(
+                        mask_element_type,
+                        ((i + shift) % element_count) as i32,
+                    )
+                })
+                .collect();
+            let mask =
+                self.context.new_rvalue_from_vector(self.location, mask_type, &vector_elements);
+            let shifted = self.context.new_rvalue_vector_perm(self.location, res, res, mask);
             shift *= 2;
             res = op(res, shifted, &self.context);
         }
-        self.context.new_vector_access(None, res, self.context.new_rvalue_zero(self.int_type))
+        self.context
+            .new_vector_access(self.location, res, self.context.new_rvalue_zero(self.int_type))
             .to_rvalue()
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     pub fn vector_reduce<F>(&mut self, _src: RValue<'gcc>, _op: F) -> RValue<'gcc>
-    where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
+    where
+        F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>,
     {
         unimplemented!();
     }
 
     pub fn vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc> {
-        self.vector_reduce(src, |a, b, context| context.new_binary_op(None, op, a.get_type(), a, b))
+        let loc = self.location.clone();
+        self.vector_reduce(src, |a, b, context| context.new_binary_op(loc, op, a.get_type(), a, b))
     }
 
-    pub fn vector_reduce_fadd_reassoc(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
+    pub fn vector_reduce_fadd_reassoc(
+        &mut self,
+        _acc: RValue<'gcc>,
+        _src: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         unimplemented!();
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn vector_reduce_fadd(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> {
         let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
         let element_count = vector_type.get_num_units();
-        (0..element_count).into_iter()
-            .map(|i| self.context
-                .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
-                .to_rvalue())
+        (0..element_count)
+            .into_iter()
+            .map(|i| {
+                self.context
+                    .new_vector_access(
+                        self.location,
+                        src,
+                        self.context.new_rvalue_from_int(self.int_type, i as _),
+                    )
+                    .to_rvalue()
+            })
             .fold(acc, |x, i| x + i)
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     pub fn vector_reduce_fadd(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
         unimplemented!();
     }
 
-    pub fn vector_reduce_fmul_reassoc(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
+    pub fn vector_reduce_fmul_reassoc(
+        &mut self,
+        _acc: RValue<'gcc>,
+        _src: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         unimplemented!();
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn vector_reduce_fmul(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> {
         let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
         let element_count = vector_type.get_num_units();
-        (0..element_count).into_iter()
-            .map(|i| self.context
-                .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
-                .to_rvalue())
+        (0..element_count)
+            .into_iter()
+            .map(|i| {
+                self.context
+                    .new_vector_access(
+                        self.location,
+                        src,
+                        self.context.new_rvalue_from_int(self.int_type, i as _),
+                    )
+                    .to_rvalue()
+            })
             .fold(acc, |x, i| x * i)
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     pub fn vector_reduce_fmul(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
         unimplemented!()
     }
 
     // Inspired by Hacker's Delight min implementation.
     pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+        let loc = self.location.clone();
         self.vector_reduce(src, |a, b, context| {
-            let differences_or_zeros = difference_or_zero(a, b, context);
-            context.new_binary_op(None, BinaryOp::Plus, b.get_type(), b, differences_or_zeros)
+            let differences_or_zeros = difference_or_zero(loc, a, b, context);
+            context.new_binary_op(loc, BinaryOp::Plus, b.get_type(), b, differences_or_zeros)
         })
     }
 
     // Inspired by Hacker's Delight max implementation.
     pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+        let loc = self.location.clone();
         self.vector_reduce(src, |a, b, context| {
-            let differences_or_zeros = difference_or_zero(a, b, context);
-            context.new_binary_op(None, BinaryOp::Minus, a.get_type(), a, differences_or_zeros)
+            let differences_or_zeros = difference_or_zero(loc, a, b, context);
+            context.new_binary_op(loc, BinaryOp::Minus, a.get_type(), a, differences_or_zeros)
         })
     }
 
-    fn vector_extremum(&mut self, a: RValue<'gcc>, b: RValue<'gcc>, direction: ExtremumOperation) -> RValue<'gcc> {
+    fn vector_extremum(
+        &mut self,
+        a: RValue<'gcc>,
+        b: RValue<'gcc>,
+        direction: ExtremumOperation,
+    ) -> RValue<'gcc> {
         let vector_type = a.get_type();
 
         // mask out the NaNs in b and replace them with the corresponding lane in a, so when a and
         // b get compared & spliced together, we get the numeric values instead of NaNs.
-        let b_nan_mask = self.context.new_comparison(None, ComparisonOp::NotEquals, b, b);
+        let b_nan_mask = self.context.new_comparison(self.location, ComparisonOp::NotEquals, b, b);
         let mask_type = b_nan_mask.get_type();
-        let b_nan_mask_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, mask_type, b_nan_mask);
-        let a_cast = self.context.new_bitcast(None, a, mask_type);
-        let b_cast = self.context.new_bitcast(None, b, mask_type);
+        let b_nan_mask_inverted =
+            self.context.new_unary_op(self.location, UnaryOp::BitwiseNegate, mask_type, b_nan_mask);
+        let a_cast = self.context.new_bitcast(self.location, a, mask_type);
+        let b_cast = self.context.new_bitcast(self.location, b, mask_type);
         let res = (b_nan_mask & a_cast) | (b_nan_mask_inverted & b_cast);
-        let b = self.context.new_bitcast(None, res, vector_type);
+        let b = self.context.new_bitcast(self.location, res, vector_type);
 
         // now do the actual comparison
         let comparison_op = match direction {
             ExtremumOperation::Min => ComparisonOp::LessThan,
             ExtremumOperation::Max => ComparisonOp::GreaterThan,
         };
-        let cmp = self.context.new_comparison(None, comparison_op, a, b);
-        let cmp_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, cmp.get_type(), cmp);
+        let cmp = self.context.new_comparison(self.location, comparison_op, a, b);
+        let cmp_inverted =
+            self.context.new_unary_op(self.location, UnaryOp::BitwiseNegate, cmp.get_type(), cmp);
         let res = (cmp & a_cast) | (cmp_inverted & res);
-        self.context.new_bitcast(None, res, vector_type)
+        self.context.new_bitcast(self.location, res, vector_type)
     }
 
     pub fn vector_fmin(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
         self.vector_extremum(a, b, ExtremumOperation::Min)
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn vector_reduce_fmin(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
         let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
         let element_count = vector_type.get_num_units();
-        let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue();
+        let mut acc = self
+            .context
+            .new_vector_access(self.location, src, self.context.new_rvalue_zero(self.int_type))
+            .to_rvalue();
         for i in 1..element_count {
-            let elem = self.context
-                .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
+            let elem = self
+                .context
+                .new_vector_access(
+                    self.location,
+                    src,
+                    self.context.new_rvalue_from_int(self.int_type, i as _),
+                )
                 .to_rvalue();
-            let cmp = self.context.new_comparison(None, ComparisonOp::LessThan, acc, elem);
+            let cmp = self.context.new_comparison(self.location, ComparisonOp::LessThan, acc, elem);
             acc = self.select(cmp, acc, elem);
         }
         acc
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     pub fn vector_reduce_fmin(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> {
         unimplemented!();
     }
@@ -1836,36 +2224,51 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         self.vector_extremum(a, b, ExtremumOperation::Max)
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn vector_reduce_fmax(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
         let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
         let element_count = vector_type.get_num_units();
-        let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue();
+        let mut acc = self
+            .context
+            .new_vector_access(self.location, src, self.context.new_rvalue_zero(self.int_type))
+            .to_rvalue();
         for i in 1..element_count {
-            let elem = self.context
-                .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
+            let elem = self
+                .context
+                .new_vector_access(
+                    self.location,
+                    src,
+                    self.context.new_rvalue_from_int(self.int_type, i as _),
+                )
                 .to_rvalue();
-            let cmp = self.context.new_comparison(None, ComparisonOp::GreaterThan, acc, elem);
+            let cmp =
+                self.context.new_comparison(self.location, ComparisonOp::GreaterThan, acc, elem);
             acc = self.select(cmp, acc, elem);
         }
         acc
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     pub fn vector_reduce_fmax(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> {
         unimplemented!();
     }
 
-    pub fn vector_select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, else_val: RValue<'gcc>) -> RValue<'gcc> {
+    pub fn vector_select(
+        &mut self,
+        cond: RValue<'gcc>,
+        then_val: RValue<'gcc>,
+        else_val: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         // cond is a vector of integers, not of bools.
         let vector_type = cond.get_type().unqualified().dyncast_vector().expect("vector type");
         let num_units = vector_type.get_num_units();
         let element_type = vector_type.get_element_type();
 
-        #[cfg(feature="master")]
+        #[cfg(feature = "master")]
         let (cond, element_type) = {
             // TODO(antoyo): dyncast_vector should not require a call to unqualified.
-            let then_val_vector_type = then_val.get_type().unqualified().dyncast_vector().expect("vector type");
+            let then_val_vector_type =
+                then_val.get_type().unqualified().dyncast_vector().expect("vector type");
             let then_val_element_type = then_val_vector_type.get_element_type();
             let then_val_element_size = then_val_element_type.get_size();
 
@@ -1873,11 +2276,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             // operation to work.
             if then_val_element_size != element_type.get_size() {
                 let new_element_type = self.type_ix(then_val_element_size as u64 * 8);
-                let new_vector_type = self.context.new_vector_type(new_element_type, num_units as u64);
-                let cond = self.context.convert_vector(None, cond, new_vector_type);
+                let new_vector_type =
+                    self.context.new_vector_type(new_element_type, num_units as u64);
+                let cond = self.context.convert_vector(self.location, cond, new_vector_type);
                 (cond, new_element_type)
-            }
-            else {
+            } else {
                 (cond, element_type)
             }
         };
@@ -1885,24 +2288,25 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let cond_type = cond.get_type();
 
         let zeros = vec![self.context.new_rvalue_zero(element_type); num_units];
-        let zeros = self.context.new_rvalue_from_vector(None, cond_type, &zeros);
+        let zeros = self.context.new_rvalue_from_vector(self.location, cond_type, &zeros);
 
         let result_type = then_val.get_type();
 
-        let masks = self.context.new_comparison(None, ComparisonOp::NotEquals, cond, zeros);
+        let masks =
+            self.context.new_comparison(self.location, ComparisonOp::NotEquals, cond, zeros);
         // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make
         // the & operation work.
         let then_val = self.bitcast_if_needed(then_val, masks.get_type());
         let then_vals = masks & then_val;
 
         let minus_ones = vec![self.context.new_rvalue_from_int(element_type, -1); num_units];
-        let minus_ones = self.context.new_rvalue_from_vector(None, cond_type, &minus_ones);
+        let minus_ones = self.context.new_rvalue_from_vector(self.location, cond_type, &minus_ones);
         let inverted_masks = masks ^ minus_ones;
         // NOTE: sometimes, the type of else_val can be different than the type of then_val in
         // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND
         // operation to work.
         // TODO: remove bitcast now that vector types can be compared?
-        let else_val = self.context.new_bitcast(None, else_val, then_val.get_type());
+        let else_val = self.context.new_bitcast(self.location, else_val, then_val.get_type());
         let else_vals = inverted_masks & else_val;
 
         let res = then_vals | else_vals;
@@ -1910,26 +2314,26 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
     }
 }
 
-fn difference_or_zero<'gcc>(a: RValue<'gcc>, b: RValue<'gcc>, context: &'gcc Context<'gcc>) -> RValue<'gcc> {
+fn difference_or_zero<'gcc>(
+    loc: Option<Location<'gcc>>,
+    a: RValue<'gcc>,
+    b: RValue<'gcc>,
+    context: &'gcc Context<'gcc>,
+) -> RValue<'gcc> {
     let difference = a - b;
-    let masks = context.new_comparison(None, ComparisonOp::GreaterThanEquals, b, a);
+    let masks = context.new_comparison(loc, ComparisonOp::GreaterThanEquals, b, a);
     // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make
     // the & operation work.
     let a_type = a.get_type();
     let masks =
-        if masks.get_type() != a_type {
-            context.new_bitcast(None, masks, a_type)
-        }
-        else {
-            masks
-        };
+        if masks.get_type() != a_type { context.new_bitcast(loc, masks, a_type) } else { masks };
     difference & masks
 }
 
 impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
     fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
         // Forward to the `get_static` method of `CodegenCx`
-        self.cx().get_static(def_id).get_address(None)
+        self.cx().get_static(def_id).get_address(self.location)
     }
 }
 
@@ -2009,15 +2413,14 @@ impl ToGccOrdering for AtomicOrdering {
     fn to_gcc(self) -> i32 {
         use MemOrdering::*;
 
-        let ordering =
-            match self {
-                AtomicOrdering::Unordered => __ATOMIC_RELAXED,
-                AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
-                AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
-                AtomicOrdering::Release => __ATOMIC_RELEASE,
-                AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
-                AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
-            };
+        let ordering = match self {
+            AtomicOrdering::Unordered => __ATOMIC_RELAXED,
+            AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
+            AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
+            AtomicOrdering::Release => __ATOMIC_RELEASE,
+            AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
+            AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
+        };
         ordering as i32
     }
 }
diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs
index 9fc77627b1b..84f49b6856d 100644
--- a/compiler/rustc_codegen_gcc/src/callee.rs
+++ b/compiler/rustc_codegen_gcc/src/callee.rs
@@ -1,8 +1,8 @@
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use gccjit::{FnAttribute, Visibility};
-use gccjit::{FunctionType, Function};
-use rustc_middle::ty::{self, Instance, TypeVisitableExt};
+use gccjit::{Function, FunctionType};
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
+use rustc_middle::ty::{self, Instance, TypeVisitableExt};
 
 use crate::attributes;
 use crate::context::CodegenCx;
@@ -28,145 +28,144 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>)
 
     let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
 
-    let func =
-        if let Some(_func) = cx.get_declared_value(&sym) {
-            // FIXME(antoyo): we never reach this because get_declared_value only returns global variables
-            // and here we try to get a function.
-            unreachable!();
-            /*
-            // Create a fn pointer with the new signature.
-            let ptrty = fn_abi.ptr_to_gcc_type(cx);
-
-            // This is subtle and surprising, but sometimes we have to bitcast
-            // the resulting fn pointer.  The reason has to do with external
-            // functions.  If you have two crates that both bind the same C
-            // library, they may not use precisely the same types: for
-            // example, they will probably each declare their own structs,
-            // which are distinct types from LLVM's point of view (nominal
-            // types).
-            //
-            // Now, if those two crates are linked into an application, and
-            // they contain inlined code, you can wind up with a situation
-            // where both of those functions wind up being loaded into this
-            // application simultaneously. In that case, the same function
-            // (from LLVM's point of view) requires two types. But of course
-            // LLVM won't allow one function to have two types.
-            //
-            // What we currently do, therefore, is declare the function with
-            // one of the two types (whichever happens to come first) and then
-            // bitcast as needed when the function is referenced to make sure
-            // it has the type we expect.
-            //
-            // This can occur on either a crate-local or crate-external
-            // reference. It also occurs when testing libcore and in some
-            // other weird situations. Annoying.
-            if cx.val_ty(func) != ptrty {
-                // TODO(antoyo): cast the pointer.
-                func
-            }
-            else {
-                func
-            }*/
+    let func = if let Some(_func) = cx.get_declared_value(&sym) {
+        // FIXME(antoyo): we never reach this because get_declared_value only returns global variables
+        // and here we try to get a function.
+        unreachable!();
+        /*
+        // Create a fn pointer with the new signature.
+        let ptrty = fn_abi.ptr_to_gcc_type(cx);
+
+        // This is subtle and surprising, but sometimes we have to bitcast
+        // the resulting fn pointer.  The reason has to do with external
+        // functions.  If you have two crates that both bind the same C
+        // library, they may not use precisely the same types: for
+        // example, they will probably each declare their own structs,
+        // which are distinct types from LLVM's point of view (nominal
+        // types).
+        //
+        // Now, if those two crates are linked into an application, and
+        // they contain inlined code, you can wind up with a situation
+        // where both of those functions wind up being loaded into this
+        // application simultaneously. In that case, the same function
+        // (from LLVM's point of view) requires two types. But of course
+        // LLVM won't allow one function to have two types.
+        //
+        // What we currently do, therefore, is declare the function with
+        // one of the two types (whichever happens to come first) and then
+        // bitcast as needed when the function is referenced to make sure
+        // it has the type we expect.
+        //
+        // This can occur on either a crate-local or crate-external
+        // reference. It also occurs when testing libcore and in some
+        // other weird situations. Annoying.
+        if cx.val_ty(func) != ptrty {
+            // TODO(antoyo): cast the pointer.
+            func
         }
         else {
-            cx.linkage.set(FunctionType::Extern);
-            let func = cx.declare_fn(&sym, &fn_abi);
-
-            attributes::from_fn_attrs(cx, func, instance);
-
-            let instance_def_id = instance.def_id();
-
-            // TODO(antoyo): set linkage and attributes.
-
-            // Apply an appropriate linkage/visibility value to our item that we
-            // just declared.
-            //
-            // This is sort of subtle. Inside our codegen unit we started off
-            // compilation by predefining all our own `MonoItem` instances. That
-            // is, everything we're codegenning ourselves is already defined. That
-            // means that anything we're actually codegenning in this codegen unit
-            // will have hit the above branch in `get_declared_value`. As a result,
-            // we're guaranteed here that we're declaring a symbol that won't get
-            // defined, or in other words we're referencing a value from another
-            // codegen unit or even another crate.
-            //
-            // So because this is a foreign value we blanket apply an external
-            // linkage directive because it's coming from a different object file.
-            // The visibility here is where it gets tricky. This symbol could be
-            // referencing some foreign crate or foreign library (an `extern`
-            // block) in which case we want to leave the default visibility. We may
-            // also, though, have multiple codegen units. It could be a
-            // monomorphization, in which case its expected visibility depends on
-            // whether we are sharing generics or not. The important thing here is
-            // that the visibility we apply to the declaration is the same one that
-            // has been applied to the definition (wherever that definition may be).
-            let is_generic = instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
-
-            if is_generic {
-                // This is a monomorphization. Its expected visibility depends
-                // on whether we are in share-generics mode.
-
-                if cx.tcx.sess.opts.share_generics() {
-                    // We are in share_generics mode.
-
-                    if let Some(instance_def_id) = instance_def_id.as_local() {
-                        // This is a definition from the current crate. If the
-                        // definition is unreachable for downstream crates or
-                        // the current crate does not re-export generics, the
-                        // definition of the instance will have been declared
-                        // as `hidden`.
-                        if cx.tcx.is_unreachable_local_definition(instance_def_id)
-                            || !cx.tcx.local_crate_exports_generics()
-                        {
-                            #[cfg(feature="master")]
-                            func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
-                        }
+            func
+        }*/
+    } else {
+        cx.linkage.set(FunctionType::Extern);
+        let func = cx.declare_fn(&sym, &fn_abi);
+
+        attributes::from_fn_attrs(cx, func, instance);
+
+        let instance_def_id = instance.def_id();
+
+        // TODO(antoyo): set linkage and attributes.
+
+        // Apply an appropriate linkage/visibility value to our item that we
+        // just declared.
+        //
+        // This is sort of subtle. Inside our codegen unit we started off
+        // compilation by predefining all our own `MonoItem` instances. That
+        // is, everything we're codegenning ourselves is already defined. That
+        // means that anything we're actually codegenning in this codegen unit
+        // will have hit the above branch in `get_declared_value`. As a result,
+        // we're guaranteed here that we're declaring a symbol that won't get
+        // defined, or in other words we're referencing a value from another
+        // codegen unit or even another crate.
+        //
+        // So because this is a foreign value we blanket apply an external
+        // linkage directive because it's coming from a different object file.
+        // The visibility here is where it gets tricky. This symbol could be
+        // referencing some foreign crate or foreign library (an `extern`
+        // block) in which case we want to leave the default visibility. We may
+        // also, though, have multiple codegen units. It could be a
+        // monomorphization, in which case its expected visibility depends on
+        // whether we are sharing generics or not. The important thing here is
+        // that the visibility we apply to the declaration is the same one that
+        // has been applied to the definition (wherever that definition may be).
+        let is_generic =
+            instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
+
+        if is_generic {
+            // This is a monomorphization. Its expected visibility depends
+            // on whether we are in share-generics mode.
+
+            if cx.tcx.sess.opts.share_generics() {
+                // We are in share_generics mode.
+
+                if let Some(instance_def_id) = instance_def_id.as_local() {
+                    // This is a definition from the current crate. If the
+                    // definition is unreachable for downstream crates or
+                    // the current crate does not re-export generics, the
+                    // definition of the instance will have been declared
+                    // as `hidden`.
+                    if cx.tcx.is_unreachable_local_definition(instance_def_id)
+                        || !cx.tcx.local_crate_exports_generics()
+                    {
+                        #[cfg(feature = "master")]
+                        func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
+                    }
+                } else {
+                    // This is a monomorphization of a generic function
+                    // defined in an upstream crate.
+                    if instance.upstream_monomorphization(tcx).is_some() {
+                        // This is instantiated in another crate. It cannot
+                        // be `hidden`.
                     } else {
-                        // This is a monomorphization of a generic function
-                        // defined in an upstream crate.
-                        if instance.upstream_monomorphization(tcx).is_some() {
-                            // This is instantiated in another crate. It cannot
-                            // be `hidden`.
-                        } else {
-                            // This is a local instantiation of an upstream definition.
-                            // If the current crate does not re-export it
-                            // (because it is a C library or an executable), it
-                            // will have been declared `hidden`.
-                            if !cx.tcx.local_crate_exports_generics() {
-                                #[cfg(feature="master")]
-                                func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
-                            }
+                        // This is a local instantiation of an upstream definition.
+                        // If the current crate does not re-export it
+                        // (because it is a C library or an executable), it
+                        // will have been declared `hidden`.
+                        if !cx.tcx.local_crate_exports_generics() {
+                            #[cfg(feature = "master")]
+                            func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
                         }
                     }
-                } else {
-                    // When not sharing generics, all instances are in the same
-                    // crate and have hidden visibility
-                    #[cfg(feature="master")]
-                    func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
                 }
             } else {
-                // This is a non-generic function
-                if cx.tcx.is_codegened_item(instance_def_id) {
-                    // This is a function that is instantiated in the local crate
-
-                    if instance_def_id.is_local() {
-                        // This is function that is defined in the local crate.
-                        // If it is not reachable, it is hidden.
-                        if !cx.tcx.is_reachable_non_generic(instance_def_id) {
-                            #[cfg(feature="master")]
-                            func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
-                        }
-                    } else {
-                        // This is a function from an upstream crate that has
-                        // been instantiated here. These are always hidden.
-                        #[cfg(feature="master")]
+                // When not sharing generics, all instances are in the same
+                // crate and have hidden visibility
+                #[cfg(feature = "master")]
+                func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
+            }
+        } else {
+            // This is a non-generic function
+            if cx.tcx.is_codegened_item(instance_def_id) {
+                // This is a function that is instantiated in the local crate
+
+                if instance_def_id.is_local() {
+                    // This is function that is defined in the local crate.
+                    // If it is not reachable, it is hidden.
+                    if !cx.tcx.is_reachable_non_generic(instance_def_id) {
+                        #[cfg(feature = "master")]
                         func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
                     }
+                } else {
+                    // This is a function from an upstream crate that has
+                    // been instantiated here. These are always hidden.
+                    #[cfg(feature = "master")]
+                    func.add_attribute(FnAttribute::Visibility(Visibility::Hidden));
                 }
             }
+        }
 
-            func
-        };
+        func
+    };
 
     cx.function_instances.borrow_mut().insert(instance, func);
 
diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs
index c6edd52d1e4..d243d7088ad 100644
--- a/compiler/rustc_codegen_gcc/src/common.rs
+++ b/compiler/rustc_codegen_gcc/src/common.rs
@@ -1,14 +1,9 @@
 use gccjit::LValue;
-use gccjit::{RValue, Type, ToRValue};
-use rustc_codegen_ssa::traits::{
-    BaseTypeMethods,
-    ConstMethods,
-    MiscMethods,
-    StaticMethods,
-};
-use rustc_middle::mir::Mutability;
-use rustc_middle::ty::layout::{LayoutOf};
+use gccjit::{RValue, ToRValue, Type};
+use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, MiscMethods, StaticMethods};
 use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
+use rustc_middle::mir::Mutability;
+use rustc_middle::ty::layout::LayoutOf;
 use rustc_target::abi::{self, HasDataLayout, Pointer};
 
 use crate::consts::const_alloc_to_gcc;
@@ -40,9 +35,7 @@ pub fn bytes_in_context<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, bytes: &[u8]) ->
     let byte_type = context.new_type::<u8>();
     let typ = context.new_array_type(None, byte_type, bytes.len() as u64);
     let elements: Vec<_> =
-        bytes.iter()
-        .map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32))
-        .collect();
+        bytes.iter().map(|&byte| context.new_rvalue_from_int(byte_type, byte as i32)).collect();
     context.new_array_constructor(None, typ, &elements)
 }
 
@@ -54,23 +47,20 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
     fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> {
         if type_is_pointer(typ) {
             self.context.new_null(typ)
-        }
-        else {
+        } else {
             self.const_int(typ, 0)
         }
     }
 
     fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> {
-        let local = self.current_func.borrow().expect("func")
-            .new_local(None, typ, "undefined");
+        let local = self.current_func.borrow().expect("func").new_local(None, typ, "undefined");
         if typ.is_struct().is_some() {
             // NOTE: hack to workaround a limitation of the rustc API: see comment on
             // CodegenCx.structs_as_pointer
             let pointer = local.get_address(None);
             self.structs_as_pointer.borrow_mut().insert(pointer);
             pointer
-        }
-        else {
+        } else {
             local.to_rvalue()
         }
     }
@@ -143,16 +133,15 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
             .or_insert_with(|| (s.to_owned(), self.global_string(s)))
             .1;
         let len = s.len();
-        let cs = self.const_ptrcast(str_global.get_address(None),
+        let cs = self.const_ptrcast(
+            str_global.get_address(None),
             self.type_ptr_to(self.layout_of(self.tcx.types.str_).gcc_type(self)),
         );
         (cs, self.const_usize(len as u64))
     }
 
     fn const_struct(&self, values: &[RValue<'gcc>], packed: bool) -> RValue<'gcc> {
-        let fields: Vec<_> = values.iter()
-            .map(|value| value.get_type())
-            .collect();
+        let fields: Vec<_> = values.iter().map(|value| value.get_type()).collect();
         // TODO(antoyo): cache the type? It's anonymous, so probably not.
         let typ = self.type_struct(&fields, packed);
         let struct_type = typ.is_struct().expect("struct type");
@@ -178,9 +167,10 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
                 // FIXME(antoyo): there's some issues with using the u128 code that follows, so hard-code
                 // the paths for floating-point values.
                 if ty == self.float_type {
-                    return self.context.new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
-                }
-                else if ty == self.double_type {
+                    return self
+                        .context
+                        .new_rvalue_from_double(ty, f32::from_bits(data as u32) as f64);
+                } else if ty == self.double_type {
                     return self.context.new_rvalue_from_double(ty, f64::from_bits(data as u64));
                 }
 
@@ -192,8 +182,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
                     // FIXME(antoyo): fix bitcast to work in constant contexts.
                     // TODO(antoyo): perhaps only use bitcast for pointers?
                     self.context.new_cast(None, value, ty)
-                }
-                else {
+                } else {
                     // TODO(bjorn3): assert size is correct
                     self.const_bitcast(value, ty)
                 }
@@ -201,42 +190,41 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
             Scalar::Ptr(ptr, _size) => {
                 let (prov, offset) = ptr.into_parts(); // we know the `offset` is relative
                 let alloc_id = prov.alloc_id();
-                let base_addr =
-                    match self.tcx.global_alloc(alloc_id) {
-                        GlobalAlloc::Memory(alloc) => {
-                            let init = const_alloc_to_gcc(self, alloc);
-                            let alloc = alloc.inner();
-                            let value =
-                                match alloc.mutability {
-                                    Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
-                                    _ => self.static_addr_of(init, alloc.align, None),
-                                };
-                            if !self.sess().fewer_names() {
-                                // TODO(antoyo): set value name.
-                            }
-                            value
-                        },
-                        GlobalAlloc::Function(fn_instance) => {
-                            self.get_fn_addr(fn_instance)
-                        },
-                        GlobalAlloc::VTable(ty, trait_ref) => {
-                            let alloc = self.tcx.global_alloc(self.tcx.vtable_allocation((ty, trait_ref))).unwrap_memory();
-                            let init = const_alloc_to_gcc(self, alloc);
-                            self.static_addr_of(init, alloc.inner().align, None)
+                let base_addr = match self.tcx.global_alloc(alloc_id) {
+                    GlobalAlloc::Memory(alloc) => {
+                        let init = const_alloc_to_gcc(self, alloc);
+                        let alloc = alloc.inner();
+                        let value = match alloc.mutability {
+                            Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
+                            _ => self.static_addr_of(init, alloc.align, None),
+                        };
+                        if !self.sess().fewer_names() {
+                            // TODO(antoyo): set value name.
                         }
-                        GlobalAlloc::Static(def_id) => {
-                            assert!(self.tcx.is_static(def_id));
-                            self.get_static(def_id).get_address(None)
-                        },
-                    };
+                        value
+                    }
+                    GlobalAlloc::Function(fn_instance) => self.get_fn_addr(fn_instance),
+                    GlobalAlloc::VTable(ty, trait_ref) => {
+                        let alloc = self
+                            .tcx
+                            .global_alloc(self.tcx.vtable_allocation((ty, trait_ref)))
+                            .unwrap_memory();
+                        let init = const_alloc_to_gcc(self, alloc);
+                        self.static_addr_of(init, alloc.inner().align, None)
+                    }
+                    GlobalAlloc::Static(def_id) => {
+                        assert!(self.tcx.is_static(def_id));
+                        self.get_static(def_id).get_address(None)
+                    }
+                };
                 let ptr_type = base_addr.get_type();
                 let base_addr = self.const_bitcast(base_addr, self.usize_type);
-                let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
+                let offset =
+                    self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
                 let ptr = self.const_bitcast(base_addr + offset, ptr_type);
                 if !matches!(layout.primitive(), Pointer(_)) {
                     self.const_bitcast(ptr.dereference(None).to_rvalue(), ty)
-                }
-                else {
+                } else {
                     self.const_bitcast(ptr, ty)
                 }
             }
@@ -261,7 +249,9 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
     }
 
     fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value {
-        self.context.new_array_access(None, base_addr, self.const_usize(offset.bytes())).get_address(None)
+        self.context
+            .new_array_access(None, base_addr, self.const_usize(offset.bytes()))
+            .get_address(None)
     }
 }
 
@@ -284,35 +274,25 @@ impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
     fn to_signed(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
         if self.is_u8(cx) {
             cx.i8_type
-        }
-        else if self.is_u16(cx) {
+        } else if self.is_u16(cx) {
             cx.i16_type
-        }
-        else if self.is_u32(cx) {
+        } else if self.is_u32(cx) {
             cx.i32_type
-        }
-        else if self.is_u64(cx) {
+        } else if self.is_u64(cx) {
             cx.i64_type
-        }
-        else if self.is_u128(cx) {
+        } else if self.is_u128(cx) {
             cx.i128_type
-        }
-        else if self.is_uchar(cx) {
+        } else if self.is_uchar(cx) {
             cx.char_type
-        }
-        else if self.is_ushort(cx) {
+        } else if self.is_ushort(cx) {
             cx.short_type
-        }
-        else if self.is_uint(cx) {
+        } else if self.is_uint(cx) {
             cx.int_type
-        }
-        else if self.is_ulong(cx) {
+        } else if self.is_ulong(cx) {
             cx.long_type
-        }
-        else if self.is_ulonglong(cx) {
+        } else if self.is_ulonglong(cx) {
             cx.longlong_type
-        }
-        else {
+        } else {
             self.clone()
         }
     }
@@ -320,41 +300,31 @@ impl<'gcc, 'tcx> SignType<'gcc, 'tcx> for Type<'gcc> {
     fn to_unsigned(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> {
         if self.is_i8(cx) {
             cx.u8_type
-        }
-        else if self.is_i16(cx) {
+        } else if self.is_i16(cx) {
             cx.u16_type
-        }
-        else if self.is_i32(cx) {
+        } else if self.is_i32(cx) {
             cx.u32_type
-        }
-        else if self.is_i64(cx) {
+        } else if self.is_i64(cx) {
             cx.u64_type
-        }
-        else if self.is_i128(cx) {
+        } else if self.is_i128(cx) {
             cx.u128_type
-        }
-        else if self.is_char(cx) {
+        } else if self.is_char(cx) {
             cx.uchar_type
-        }
-        else if self.is_short(cx) {
+        } else if self.is_short(cx) {
             cx.ushort_type
-        }
-        else if self.is_int(cx) {
+        } else if self.is_int(cx) {
             cx.uint_type
-        }
-        else if self.is_long(cx) {
+        } else if self.is_long(cx) {
             cx.ulong_type
-        }
-        else if self.is_longlong(cx) {
+        } else if self.is_longlong(cx) {
             cx.ulonglong_type
-        }
-        else {
+        } else {
             self.clone()
         }
     }
 }
 
-pub trait TypeReflection<'gcc, 'tcx>  {
+pub trait TypeReflection<'gcc, 'tcx> {
     fn is_uchar(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
     fn is_ushort(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
     fn is_uint(&self, cx: &CodegenCx<'gcc, 'tcx>) -> bool;
diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs
index 70d8db02247..327c9bdada9 100644
--- a/compiler/rustc_codegen_gcc/src/consts.rs
+++ b/compiler/rustc_codegen_gcc/src/consts.rs
@@ -2,12 +2,14 @@
 use gccjit::{FnAttribute, VarAttribute, Visibility};
 use gccjit::{Function, GlobalKind, LValue, RValue, ToRValue};
 use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods, DerivedTypeMethods, StaticMethods};
-use rustc_middle::span_bug;
 use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
+use rustc_middle::mir::interpret::{
+    self, read_target_uint, ConstAllocation, ErrorHandled, Scalar as InterpScalar,
+};
 use rustc_middle::mir::mono::MonoItem;
-use rustc_middle::ty::{self, Instance, Ty};
+use rustc_middle::span_bug;
 use rustc_middle::ty::layout::LayoutOf;
-use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint};
+use rustc_middle::ty::{self, Instance, Ty};
 use rustc_span::def_id::DefId;
 use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange};
 
@@ -16,7 +18,11 @@ use crate::context::CodegenCx;
 use crate::errors::InvalidMinimumAlignment;
 use crate::type_of::LayoutGccExt;
 
-fn set_global_alignment<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, gv: LValue<'gcc>, mut align: Align) {
+fn set_global_alignment<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    gv: LValue<'gcc>,
+    mut align: Align,
+) {
     // The target may require greater alignment for globals than the type does.
     // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
     // which can force it to be smaller. Rust doesn't support this yet.
@@ -48,7 +54,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
         }
         let global_value = self.static_addr_of_mut(cv, align, kind);
         #[cfg(feature = "master")]
-        self.global_lvalues.borrow().get(&global_value)
+        self.global_lvalues
+            .borrow()
+            .get(&global_value)
             .expect("`static_addr_of_mut` did not add the global to `self.global_lvalues`")
             .global_set_readonly();
         self.const_globals.borrow_mut().insert(cv, global_value);
@@ -58,25 +66,20 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
     fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
         let attrs = self.tcx.codegen_fn_attrs(def_id);
 
-        let value =
-            match codegen_static_initializer(&self, def_id) {
-                Ok((value, _)) => value,
-                // Error has already been reported
-                Err(_) => return,
-            };
+        let value = match codegen_static_initializer(&self, def_id) {
+            Ok((value, _)) => value,
+            // Error has already been reported
+            Err(_) => return,
+        };
 
         let global = self.get_static(def_id);
 
         // boolean SSA values are i1, but they have to be stored in i8 slots,
         // otherwise some LLVM optimization passes don't work as expected
         let val_llty = self.val_ty(value);
-        let value =
-            if val_llty == self.type_i1() {
-                unimplemented!();
-            }
-            else {
-                value
-            };
+        if val_llty == self.type_i1() {
+            unimplemented!();
+        };
 
         let instance = Instance::mono(self.tcx, def_id);
         let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
@@ -89,11 +92,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
 
         // As an optimization, all shared statics which do not have interior
         // mutability are placed into read-only memory.
-        if !is_mutable {
-            if self.type_is_freeze(ty) {
-                #[cfg(feature = "master")]
-                global.global_set_readonly();
-            }
+        if !is_mutable && self.type_is_freeze(ty) {
+            #[cfg(feature = "master")]
+            global.global_set_readonly();
         }
 
         if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
@@ -149,7 +150,9 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
             // TODO(antoyo): set link section.
         }
 
-        if attrs.flags.contains(CodegenFnAttrFlags::USED) || attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) {
+        if attrs.flags.contains(CodegenFnAttrFlags::USED)
+            || attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER)
+        {
             self.add_used_global(global.to_rvalue());
         }
     }
@@ -166,29 +169,33 @@ impl<'gcc, 'tcx> StaticMethods for CodegenCx<'gcc, 'tcx> {
 }
 
 impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
-    #[cfg_attr(not(feature="master"), allow(unused_variables))]
+    #[cfg_attr(not(feature = "master"), allow(unused_variables))]
     pub fn add_used_function(&self, function: Function<'gcc>) {
         #[cfg(feature = "master")]
         function.add_attribute(FnAttribute::Used);
     }
 
-    pub fn static_addr_of_mut(&self, cv: RValue<'gcc>, align: Align, kind: Option<&str>) -> RValue<'gcc> {
-        let global =
-            match kind {
-                Some(kind) if !self.tcx.sess.fewer_names() => {
-                    let name = self.generate_local_symbol_name(kind);
-                    // TODO(antoyo): check if it's okay that no link_section is set.
-
-                    let typ = self.val_ty(cv).get_aligned(align.bytes());
-                    let global = self.declare_private_global(&name[..], typ);
-                    global
-                }
-                _ => {
-                    let typ = self.val_ty(cv).get_aligned(align.bytes());
-                    let global = self.declare_unnamed_global(typ);
-                    global
-                },
-            };
+    pub fn static_addr_of_mut(
+        &self,
+        cv: RValue<'gcc>,
+        align: Align,
+        kind: Option<&str>,
+    ) -> RValue<'gcc> {
+        let global = match kind {
+            Some(kind) if !self.tcx.sess.fewer_names() => {
+                let name = self.generate_local_symbol_name(kind);
+                // TODO(antoyo): check if it's okay that no link_section is set.
+
+                let typ = self.val_ty(cv).get_aligned(align.bytes());
+                let global = self.declare_private_global(&name[..], typ);
+                global
+            }
+            _ => {
+                let typ = self.val_ty(cv).get_aligned(align.bytes());
+                let global = self.declare_unnamed_global(typ);
+                global
+            }
+        };
         global.global_set_initializer_rvalue(cv);
         // TODO(antoyo): set unnamed address.
         let rvalue = global.get_address(None);
@@ -215,8 +222,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
         let sym = self.tcx.symbol_name(instance).name;
 
-        let global =
-            if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
+        let global = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
             let llty = self.layout_of(ty).gcc_type(self);
             if let Some(global) = self.get_declared_value(sym) {
                 if self.val_ty(global) != self.type_ptr_to(llty) {
@@ -235,7 +241,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
 
             if !self.tcx.is_reachable_non_generic(def_id) {
                 #[cfg(feature = "master")]
-                global.add_attribute(VarAttribute::Visibility(Visibility::Hidden));
+                global.add_string_attribute(VarAttribute::Visibility(Visibility::Hidden));
             }
 
             global
@@ -278,7 +284,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
     }
 }
 
-pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAllocation<'tcx>) -> RValue<'gcc> {
+pub fn const_alloc_to_gcc<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    alloc: ConstAllocation<'tcx>,
+) -> RValue<'gcc> {
     let alloc = alloc.inner();
     let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1);
     let dl = cx.data_layout();
@@ -300,14 +309,14 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
             let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
             llvals.push(cx.const_bytes(bytes));
         }
-        let ptr_offset =
-            read_target_uint( dl.endian,
-                // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
-                // affect interpreter execution (we inspect the result after interpreter execution),
-                // and we properly interpret the provenance as a relocation pointer offset.
-                alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
-            )
-            .expect("const_alloc_to_llvm: could not read relocation pointer")
+        let ptr_offset = read_target_uint(
+            dl.endian,
+            // This `inspect` is okay since it is within the bounds of the allocation, it doesn't
+            // affect interpreter execution (we inspect the result after interpreter execution),
+            // and we properly interpret the provenance as a relocation pointer offset.
+            alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
+        )
+        .expect("const_alloc_to_llvm: could not read relocation pointer")
             as u64;
 
         let address_space = cx.tcx.global_alloc(alloc_id).address_space(cx);
@@ -317,7 +326,10 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
                 interpret::Pointer::new(prov, Size::from_bytes(ptr_offset)),
                 &cx.tcx,
             ),
-            abi::Scalar::Initialized { value: Primitive::Pointer(address_space), valid_range: WrappingRange::full(dl.pointer_size) },
+            abi::Scalar::Initialized {
+                value: Primitive::Pointer(address_space),
+                valid_range: WrappingRange::full(dl.pointer_size),
+            },
             cx.type_i8p_ext(address_space),
         ));
         next_offset = offset + pointer_size;
@@ -337,17 +349,29 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
     cx.const_struct(&llvals, true)
 }
 
-pub fn codegen_static_initializer<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, def_id: DefId) -> Result<(RValue<'gcc>, ConstAllocation<'tcx>), ErrorHandled> {
+pub fn codegen_static_initializer<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    def_id: DefId,
+) -> Result<(RValue<'gcc>, ConstAllocation<'tcx>), ErrorHandled> {
     let alloc = cx.tcx.eval_static_initializer(def_id)?;
     Ok((const_alloc_to_gcc(cx, alloc), alloc))
 }
 
-fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: &str) -> LValue<'gcc> {
+fn check_and_apply_linkage<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    attrs: &CodegenFnAttrs,
+    ty: Ty<'tcx>,
+    sym: &str,
+) -> LValue<'gcc> {
     let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
     let gcc_type = cx.layout_of(ty).gcc_type(cx);
     if let Some(linkage) = attrs.import_linkage {
         // Declare a symbol `foo` with the desired linkage.
-        let global1 = cx.declare_global_with_linkage(&sym, cx.type_i8(), base::global_linkage_to_gcc(linkage));
+        let global1 = cx.declare_global_with_linkage(
+            &sym,
+            cx.type_i8(),
+            base::global_linkage_to_gcc(linkage),
+        );
 
         // Declare an internal global `extern_with_linkage_foo` which
         // is initialized with the address of `foo`.  If `foo` is
@@ -363,8 +387,7 @@ fn check_and_apply_linkage<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, attrs: &Codeg
         global2.global_set_initializer_rvalue(value);
         // TODO(antoyo): use global_set_initializer() when it will work.
         global2
-    }
-    else {
+    } else {
         // Generate an external declaration.
         // FIXME(nagisa): investigate whether it can be changed into define_global
 
diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs
index 5760d96165d..bc3d62f2679 100644
--- a/compiler/rustc_codegen_gcc/src/context.rs
+++ b/compiler/rustc_codegen_gcc/src/context.rs
@@ -1,22 +1,25 @@
 use std::cell::{Cell, RefCell};
 
-use gccjit::{Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, RValue, Type};
-use rustc_codegen_ssa::base::wants_msvc_seh;
-use rustc_codegen_ssa::traits::{
-    BackendTypes,
-    BaseTypeMethods,
-    MiscMethods,
+use gccjit::{
+    Block, CType, Context, Function, FunctionPtrType, FunctionType, LValue, Location, RValue, Type,
 };
+use rustc_codegen_ssa::base::wants_msvc_seh;
 use rustc_codegen_ssa::errors as ssa_errors;
+use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, MiscMethods};
 use rustc_data_structures::base_n;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_middle::span_bug;
 use rustc_middle::mir::mono::CodegenUnit;
+use rustc_middle::span_bug;
+use rustc_middle::ty::layout::{
+    FnAbiError, FnAbiOf, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError,
+    LayoutOfHelpers, TyAndLayout,
+};
 use rustc_middle::ty::{self, Instance, ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt};
-use rustc_middle::ty::layout::{FnAbiError, FnAbiOf, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, TyAndLayout, LayoutOfHelpers};
 use rustc_session::Session;
-use rustc_span::{Span, source_map::respan};
-use rustc_target::abi::{call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx};
+use rustc_span::{source_map::respan, Span};
+use rustc_target::abi::{
+    call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx,
+};
 use rustc_target::spec::{HasTargetSpec, Target, TlsModel};
 
 use crate::callee::get_fn;
@@ -81,7 +84,8 @@ pub struct CodegenCx<'gcc, 'tcx> {
     /// Cache function instances of monomorphic and polymorphic items
     pub function_instances: RefCell<FxHashMap<Instance<'tcx>, Function<'gcc>>>,
     /// Cache generated vtables
-    pub vtables: RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
+    pub vtables:
+        RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>>,
 
     // TODO(antoyo): improve the SSA API to not require those.
     /// Mapping from function pointer type to indexes of on stack parameters.
@@ -121,24 +125,28 @@ pub struct CodegenCx<'gcc, 'tcx> {
 }
 
 impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
-    pub fn new(context: &'gcc Context<'gcc>, codegen_unit: &'tcx CodegenUnit<'tcx>, tcx: TyCtxt<'tcx>, supports_128bit_integers: bool) -> Self {
+    pub fn new(
+        context: &'gcc Context<'gcc>,
+        codegen_unit: &'tcx CodegenUnit<'tcx>,
+        tcx: TyCtxt<'tcx>,
+        supports_128bit_integers: bool,
+    ) -> Self {
         let check_overflow = tcx.sess.overflow_checks();
 
         let create_type = |ctype, rust_type| {
             let layout = tcx.layout_of(ParamEnv::reveal_all().and(rust_type)).unwrap();
             let align = layout.align.abi.bytes();
-            #[cfg(feature="master")]
+            #[cfg(feature = "master")]
             {
                 context.new_c_type(ctype).get_aligned(align)
             }
-            #[cfg(not(feature="master"))]
+            #[cfg(not(feature = "master"))]
             {
                 // Since libgccjit 12 doesn't contain the fix to compare aligned integer types,
                 // only align u128 and i128.
                 if layout.ty.int_size_and_signed(tcx).0.bytes() == 16 {
                     context.new_c_type(ctype).get_aligned(align)
-                }
-                else {
+                } else {
                     context.new_c_type(ctype)
                 }
             }
@@ -153,24 +161,22 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         let u32_type = create_type(CType::UInt32t, tcx.types.u32);
         let u64_type = create_type(CType::UInt64t, tcx.types.u64);
 
-        let (i128_type, u128_type) =
-            if supports_128bit_integers {
-                let i128_type = create_type(CType::Int128t, tcx.types.i128);
-                let u128_type = create_type(CType::UInt128t, tcx.types.u128);
-                (i128_type, u128_type)
-            }
-            else {
-                /*let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.i128)).unwrap();
-                let i128_align = layout.align.abi.bytes();
-                let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.u128)).unwrap();
-                let u128_align = layout.align.abi.bytes();*/
-
-                // TODO(antoyo): re-enable the alignment when libgccjit fixed the issue in
-                // gcc_jit_context_new_array_constructor (it should not use reinterpret_cast).
-                let i128_type = context.new_array_type(None, i64_type, 2)/*.get_aligned(i128_align)*/;
-                let u128_type = context.new_array_type(None, u64_type, 2)/*.get_aligned(u128_align)*/;
-                (i128_type, u128_type)
-            };
+        let (i128_type, u128_type) = if supports_128bit_integers {
+            let i128_type = create_type(CType::Int128t, tcx.types.i128);
+            let u128_type = create_type(CType::UInt128t, tcx.types.u128);
+            (i128_type, u128_type)
+        } else {
+            /*let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.i128)).unwrap();
+            let i128_align = layout.align.abi.bytes();
+            let layout = tcx.layout_of(ParamEnv::reveal_all().and(tcx.types.u128)).unwrap();
+            let u128_align = layout.align.abi.bytes();*/
+
+            // TODO(antoyo): re-enable the alignment when libgccjit fixed the issue in
+            // gcc_jit_context_new_array_constructor (it should not use reinterpret_cast).
+            let i128_type = context.new_array_type(None, i64_type, 2)/*.get_aligned(i128_align)*/;
+            let u128_type = context.new_array_type(None, u64_type, 2)/*.get_aligned(u128_align)*/;
+            (i128_type, u128_type)
+        };
 
         let tls_model = to_gcc_tls_mode(tcx.sess.tls_model());
 
@@ -196,16 +202,65 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
 
         let mut functions = FxHashMap::default();
         let builtins = [
-            "__builtin_unreachable", "abort", "__builtin_expect", /*"__builtin_expect_with_probability",*/
-            "__builtin_constant_p", "__builtin_add_overflow", "__builtin_mul_overflow", "__builtin_saddll_overflow",
-            /*"__builtin_sadd_overflow",*/ "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
-            "__builtin_ssubll_overflow", /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow", "__builtin_uaddll_overflow",
-            "__builtin_uadd_overflow", "__builtin_umulll_overflow", "__builtin_umul_overflow", "__builtin_usubll_overflow",
-            "__builtin_usub_overflow", "sqrtf", "sqrt", "__builtin_powif", "__builtin_powi", "sinf", "sin", "cosf", "cos",
-            "powf", "pow", "expf", "exp", "exp2f", "exp2", "logf", "log", "log10f", "log10", "log2f", "log2", "fmaf",
-            "fma", "fabsf", "fabs", "fminf", "fmin", "fmaxf", "fmax", "copysignf", "copysign", "floorf", "floor", "ceilf",
-            "ceil", "truncf", "trunc", "rintf", "rint", "nearbyintf", "nearbyint", "roundf", "round",
-           
+            "__builtin_unreachable",
+            "abort",
+            "__builtin_expect", /*"__builtin_expect_with_probability",*/
+            "__builtin_constant_p",
+            "__builtin_add_overflow",
+            "__builtin_mul_overflow",
+            "__builtin_saddll_overflow",
+            /*"__builtin_sadd_overflow",*/
+            "__builtin_smulll_overflow", /*"__builtin_smul_overflow",*/
+            "__builtin_ssubll_overflow",
+            /*"__builtin_ssub_overflow",*/ "__builtin_sub_overflow",
+            "__builtin_uaddll_overflow",
+            "__builtin_uadd_overflow",
+            "__builtin_umulll_overflow",
+            "__builtin_umul_overflow",
+            "__builtin_usubll_overflow",
+            "__builtin_usub_overflow",
+            "sqrtf",
+            "sqrt",
+            "__builtin_powif",
+            "__builtin_powi",
+            "sinf",
+            "sin",
+            "cosf",
+            "cos",
+            "powf",
+            "pow",
+            "expf",
+            "exp",
+            "exp2f",
+            "exp2",
+            "logf",
+            "log",
+            "log10f",
+            "log10",
+            "log2f",
+            "log2",
+            "fmaf",
+            "fma",
+            "fabsf",
+            "fabs",
+            "fminf",
+            "fmin",
+            "fmaxf",
+            "fmax",
+            "copysignf",
+            "copysign",
+            "floorf",
+            "floor",
+            "ceilf",
+            "ceil",
+            "truncf",
+            "trunc",
+            "rintf",
+            "rint",
+            "nearbyintf",
+            "nearbyint",
+            "roundf",
+            "round",
         ];
 
         for builtin in builtins.iter() {
@@ -282,8 +337,12 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
 
     pub fn rvalue_as_function(&self, value: RValue<'gcc>) -> Function<'gcc> {
         let function: Function<'gcc> = unsafe { std::mem::transmute(value) };
-        debug_assert!(self.functions.borrow().values().any(|value| *value == function),
-            "{:?} ({:?}) is not a function", value, value.get_type());
+        debug_assert!(
+            self.functions.borrow().values().any(|value| *value == function),
+            "{:?} ({:?}) is not a function",
+            value,
+            value.get_type()
+        );
         function
     }
 
@@ -305,13 +364,13 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
             }
         }
 
-        self.supports_128bit_integers &&
-            (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
+        self.supports_128bit_integers
+            && (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
     }
 
     pub fn is_non_native_int_type(&self, typ: Type<'gcc>) -> bool {
-        !self.supports_128bit_integers &&
-            (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
+        !self.supports_128bit_integers
+            && (self.u128_type.is_compatible_with(typ) || self.i128_type.is_compatible_with(typ))
     }
 
     pub fn is_native_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
@@ -319,18 +378,23 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
     }
 
     pub fn is_int_type_or_bool(&self, typ: Type<'gcc>) -> bool {
-        self.is_native_int_type(typ) || self.is_non_native_int_type(typ) || typ.is_compatible_with(self.bool_type)
+        self.is_native_int_type(typ)
+            || self.is_non_native_int_type(typ)
+            || typ.is_compatible_with(self.bool_type)
     }
 
     pub fn sess(&self) -> &'tcx Session {
         &self.tcx.sess
     }
 
-    pub fn bitcast_if_needed(&self, value: RValue<'gcc>, expected_type: Type<'gcc>) -> RValue<'gcc> {
+    pub fn bitcast_if_needed(
+        &self,
+        value: RValue<'gcc>,
+        expected_type: Type<'gcc>,
+    ) -> RValue<'gcc> {
         if value.get_type() != expected_type {
             self.context.new_bitcast(None, value, expected_type)
-        }
-        else {
+        } else {
             value
         }
     }
@@ -345,12 +409,14 @@ impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> {
     type Funclet = (); // TODO(antoyo)
 
     type DIScope = (); // TODO(antoyo)
-    type DILocation = (); // TODO(antoyo)
+    type DILocation = Location<'gcc>;
     type DIVariable = (); // TODO(antoyo)
 }
 
 impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
-    fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>> {
+    fn vtables(
+        &self,
+    ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<PolyExistentialTraitRef<'tcx>>), RValue<'gcc>>> {
         &self.vtables
     }
 
@@ -364,13 +430,11 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
     fn get_fn_addr(&self, instance: Instance<'tcx>) -> RValue<'gcc> {
         let func_name = self.tcx.symbol_name(instance).name;
 
-        let func =
-            if self.intrinsics.borrow().contains_key(func_name) {
-                self.intrinsics.borrow()[func_name].clone()
-            }
-            else {
-                get_fn(self, instance)
-            };
+        let func = if self.intrinsics.borrow().contains_key(func_name) {
+            self.intrinsics.borrow()[func_name].clone()
+        } else {
+            get_fn(self, instance)
+        };
         let ptr = func.get_address(None);
 
         // TODO(antoyo): don't do this twice: i.e. in declare_fn and here.
@@ -407,37 +471,34 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
             return llpersonality;
         }
         let tcx = self.tcx;
-        let func =
-            match tcx.lang_items().eh_personality() {
-                Some(def_id) if !wants_msvc_seh(self.sess()) => {
-                    let instance =
-                        ty::Instance::resolve(
-                            tcx,
-                            ty::ParamEnv::reveal_all(),
-                            def_id,
-                            ty::List::empty(),
-                        )
-                        .unwrap().unwrap();
-
-                    let symbol_name = tcx.symbol_name(instance).name;
-                    let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
-                    self.linkage.set(FunctionType::Extern);
-                    let func = self.declare_fn(symbol_name, &fn_abi);
-                    let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
-                    func
-                },
-                _ => {
-                    let name =
-                        if wants_msvc_seh(self.sess()) {
-                            "__CxxFrameHandler3"
-                        }
-                        else {
-                            "rust_eh_personality"
-                        };
-                    let func = self.declare_func(name, self.type_i32(), &[], true);
-                    unsafe { std::mem::transmute(func) }
-                }
-            };
+        let func = match tcx.lang_items().eh_personality() {
+            Some(def_id) if !wants_msvc_seh(self.sess()) => {
+                let instance = ty::Instance::resolve(
+                    tcx,
+                    ty::ParamEnv::reveal_all(),
+                    def_id,
+                    ty::List::empty(),
+                )
+                .unwrap()
+                .unwrap();
+
+                let symbol_name = tcx.symbol_name(instance).name;
+                let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
+                self.linkage.set(FunctionType::Extern);
+                let func = self.declare_fn(symbol_name, &fn_abi);
+                let func: RValue<'gcc> = unsafe { std::mem::transmute(func) };
+                func
+            }
+            _ => {
+                let name = if wants_msvc_seh(self.sess()) {
+                    "__CxxFrameHandler3"
+                } else {
+                    "rust_eh_personality"
+                };
+                let func = self.declare_func(name, self.type_i32(), &[], true);
+                unsafe { std::mem::transmute(func) }
+            }
+        };
         // TODO(antoyo): apply target cpu attributes.
         self.eh_personality.set(Some(func));
         func
@@ -467,8 +528,7 @@ impl<'gcc, 'tcx> MiscMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
         let entry_name = self.sess().target.entry_name.as_ref();
         if self.get_declared_value(entry_name).is_none() {
             Some(self.declare_entry_fn(entry_name, fn_type, ()))
-        }
-        else {
+        } else {
             // If the symbol already exists, it is an error: for example, the user wrote
             // #[no_mangle] extern "C" fn main(..) {..}
             // instead of #[start]
diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs
index d1bfd833cd8..aed15769025 100644
--- a/compiler/rustc_codegen_gcc/src/debuginfo.rs
+++ b/compiler/rustc_codegen_gcc/src/debuginfo.rs
@@ -1,9 +1,14 @@
-use gccjit::RValue;
-use rustc_codegen_ssa::mir::debuginfo::{FunctionDebugContext, VariableKind};
+use crate::rustc_index::Idx;
+use gccjit::{Location, RValue};
+use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
 use rustc_codegen_ssa::traits::{DebugInfoBuilderMethods, DebugInfoMethods};
-use rustc_middle::mir;
+use rustc_data_structures::sync::Lrc;
+use rustc_index::bit_set::BitSet;
+use rustc_index::IndexVec;
+use rustc_middle::mir::{self, Body, SourceScope};
 use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty};
-use rustc_span::{SourceFile, Span, Symbol};
+use rustc_session::config::DebugInfo;
+use rustc_span::{BytePos, Pos, SourceFile, SourceFileAndLine, Span, Symbol};
 use rustc_target::abi::call::FnAbi;
 use rustc_target::abi::Size;
 use std::ops::Range;
@@ -11,31 +16,183 @@ use std::ops::Range;
 use crate::builder::Builder;
 use crate::context::CodegenCx;
 
+pub(super) const UNKNOWN_LINE_NUMBER: u32 = 0;
+pub(super) const UNKNOWN_COLUMN_NUMBER: u32 = 0;
+
 impl<'a, 'gcc, 'tcx> DebugInfoBuilderMethods for Builder<'a, 'gcc, 'tcx> {
     // FIXME(eddyb) find a common convention for all of the debuginfo-related
     // names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
     fn dbg_var_addr(
         &mut self,
         _dbg_var: Self::DIVariable,
-        _scope_metadata: Self::DIScope,
+        _dbg_loc: Self::DILocation,
         _variable_alloca: Self::Value,
         _direct_offset: Size,
         _indirect_offsets: &[Size],
         _fragment: Option<Range<Size>>,
     ) {
-        unimplemented!();
+        // FIXME(tempdragon): Not sure if this is correct, probably wrong but still keep it here.
+        #[cfg(feature = "master")]
+        _variable_alloca.set_location(_dbg_loc);
     }
 
     fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
         // TODO(antoyo): insert reference to gdb debug scripts section global.
     }
 
-    fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {
-        unimplemented!();
+    /// FIXME(tempdragon): Currently, this function is not yet implemented. It seems that the
+    /// debug name and the mangled name should both be included in the LValues.
+    /// Besides, a function to get the rvalue type(m_is_lvalue) should also be included.
+    fn set_var_name(&mut self, _value: RValue<'gcc>, _name: &str) {}
+
+    fn set_dbg_loc(&mut self, dbg_loc: Self::DILocation) {
+        self.location = Some(dbg_loc);
+    }
+}
+
+/// Generate the `debug_context` in an MIR Body.
+/// # Souce of Origin
+/// Copied from `create_scope_map.rs` of rustc_codegen_llvm
+fn compute_mir_scopes<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    instance: Instance<'tcx>,
+    mir: &Body<'tcx>,
+    debug_context: &mut FunctionDebugContext<'tcx, (), Location<'gcc>>,
+) {
+    // Find all scopes with variables defined in them.
+    let variables = if cx.sess().opts.debuginfo == DebugInfo::Full {
+        let mut vars = BitSet::new_empty(mir.source_scopes.len());
+        // FIXME(eddyb) take into account that arguments always have debuginfo,
+        // irrespective of their name (assuming full debuginfo is enabled).
+        // NOTE(eddyb) actually, on second thought, those are always in the
+        // function scope, which always exists.
+        for var_debug_info in &mir.var_debug_info {
+            vars.insert(var_debug_info.source_info.scope);
+        }
+        Some(vars)
+    } else {
+        // Nothing to emit, of course.
+        None
+    };
+    let mut instantiated = BitSet::new_empty(mir.source_scopes.len());
+    // Instantiate all scopes.
+    for idx in 0..mir.source_scopes.len() {
+        let scope = SourceScope::new(idx);
+        make_mir_scope(cx, instance, mir, &variables, debug_context, &mut instantiated, scope);
+    }
+    assert!(instantiated.count() == mir.source_scopes.len());
+}
+
+/// Update the `debug_context`, adding new scope to it,
+/// if it's not added as is denoted in `instantiated`.
+///
+/// # Souce of Origin
+/// Copied from `create_scope_map.rs` of rustc_codegen_llvm
+/// FIXME(tempdragon/?): Add Scope Support Here.
+fn make_mir_scope<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    instance: Instance<'tcx>,
+    mir: &Body<'tcx>,
+    variables: &Option<BitSet<SourceScope>>,
+    debug_context: &mut FunctionDebugContext<'tcx, (), Location<'gcc>>,
+    instantiated: &mut BitSet<SourceScope>,
+    scope: SourceScope,
+) {
+    if instantiated.contains(scope) {
+        return;
+    }
+
+    let scope_data = &mir.source_scopes[scope];
+    let parent_scope = if let Some(parent) = scope_data.parent_scope {
+        make_mir_scope(cx, instance, mir, variables, debug_context, instantiated, parent);
+        debug_context.scopes[parent]
+    } else {
+        // The root is the function itself.
+        let file = cx.sess().source_map().lookup_source_file(mir.span.lo());
+        debug_context.scopes[scope] = DebugScope {
+            file_start_pos: file.start_pos,
+            file_end_pos: file.end_position(),
+            ..debug_context.scopes[scope]
+        };
+        instantiated.insert(scope);
+        return;
+    };
+
+    if let Some(vars) = variables {
+        if !vars.contains(scope) && scope_data.inlined.is_none() {
+            // Do not create a DIScope if there are no variables defined in this
+            // MIR `SourceScope`, and it's not `inlined`, to avoid debuginfo bloat.
+            debug_context.scopes[scope] = parent_scope;
+            instantiated.insert(scope);
+            return;
+        }
     }
 
-    fn set_dbg_loc(&mut self, _dbg_loc: Self::DILocation) {
-        unimplemented!();
+    let loc = cx.lookup_debug_loc(scope_data.span.lo());
+
+    // FIXME(tempdragon): Add the scope related code here if the scope is supported.
+    let dbg_scope = ();
+
+    let inlined_at = scope_data.inlined.map(|(_, callsite_span)| {
+        // FIXME(eddyb) this doesn't account for the macro-related
+        // `Span` fixups that `rustc_codegen_ssa::mir::debuginfo` does.
+        let callsite_scope = parent_scope.adjust_dbg_scope_for_span(cx, callsite_span);
+        cx.dbg_loc(callsite_scope, parent_scope.inlined_at, callsite_span)
+    });
+    let p_inlined_at = parent_scope.inlined_at;
+    // TODO(tempdragon): dbg_scope: Add support for scope extension here.
+    inlined_at.or(p_inlined_at);
+
+    debug_context.scopes[scope] = DebugScope {
+        dbg_scope,
+        inlined_at,
+        file_start_pos: loc.file.start_pos,
+        file_end_pos: loc.file.end_position(),
+    };
+    instantiated.insert(scope);
+}
+
+/// A source code location used to generate debug information.
+// FIXME(eddyb) rename this to better indicate it's a duplicate of
+// `rustc_span::Loc` rather than `DILocation`, perhaps by making
+// `lookup_char_pos` return the right information instead.
+pub struct DebugLoc {
+    /// Information about the original source file.
+    pub file: Lrc<SourceFile>,
+    /// The (1-based) line number.
+    pub line: u32,
+    /// The (1-based) column number.
+    pub col: u32,
+}
+
+impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
+    /// Looks up debug source information about a `BytePos`.
+    // FIXME(eddyb) rename this to better indicate it's a duplicate of
+    // `lookup_char_pos` rather than `dbg_loc`, perhaps by making
+    // `lookup_char_pos` return the right information instead.
+    // Source of Origin: cg_llvm
+    pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
+        let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
+            Ok(SourceFileAndLine { sf: file, line }) => {
+                let line_pos = file.lines()[line];
+
+                // Use 1-based indexing.
+                let line = (line + 1) as u32;
+                let col = (file.relative_position(pos) - line_pos).to_u32() + 1;
+
+                (file, line, col)
+            }
+            Err(file) => (file, UNKNOWN_LINE_NUMBER, UNKNOWN_COLUMN_NUMBER),
+        };
+
+        // For MSVC, omit the column number.
+        // Otherwise, emit it. This mimics clang behaviour.
+        // See discussion in https://github.com/rust-lang/rust/issues/42921
+        if self.sess().target.is_like_msvc {
+            DebugLoc { file, line, col: UNKNOWN_COLUMN_NUMBER }
+        } else {
+            DebugLoc { file, line, col }
+        }
     }
 }
 
@@ -51,13 +208,31 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
 
     fn create_function_debug_context(
         &self,
-        _instance: Instance<'tcx>,
-        _fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
-        _llfn: RValue<'gcc>,
-        _mir: &mir::Body<'tcx>,
+        instance: Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        llfn: RValue<'gcc>,
+        mir: &mir::Body<'tcx>,
     ) -> Option<FunctionDebugContext<'tcx, Self::DIScope, Self::DILocation>> {
-        // TODO(antoyo)
-        None
+        if self.sess().opts.debuginfo == DebugInfo::None {
+            return None;
+        }
+
+        // Initialize fn debug context (including scopes).
+        let empty_scope = DebugScope {
+            dbg_scope: self.dbg_scope_fn(instance, fn_abi, Some(llfn)),
+            inlined_at: None,
+            file_start_pos: BytePos(0),
+            file_end_pos: BytePos(0),
+        };
+        let mut fn_debug_context = FunctionDebugContext {
+            scopes: IndexVec::from_elem(empty_scope, &mir.source_scopes.as_slice()),
+            inlined_function_scopes: Default::default(),
+        };
+
+        // Fill in all the scopes, with the information from the MIR body.
+        compute_mir_scopes(self, instance, mir, &mut fn_debug_context);
+
+        Some(fn_debug_context)
     }
 
     fn extend_scope_to_file(
@@ -65,11 +240,11 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
         _scope_metadata: Self::DIScope,
         _file: &SourceFile,
     ) -> Self::DIScope {
-        unimplemented!();
+        // TODO(antoyo): implement.
     }
 
     fn debuginfo_finalize(&self) {
-        // TODO(antoyo)
+        self.context.set_debug_info(true)
     }
 
     fn create_dbg_var(
@@ -80,7 +255,6 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
         _variable_kind: VariableKind,
         _span: Span,
     ) -> Self::DIVariable {
-        unimplemented!();
     }
 
     fn dbg_scope_fn(
@@ -89,15 +263,40 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
         _fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
         _maybe_definition_llfn: Option<RValue<'gcc>>,
     ) -> Self::DIScope {
-        unimplemented!();
+        // TODO(antoyo): implement.
     }
 
     fn dbg_loc(
         &self,
         _scope: Self::DIScope,
         _inlined_at: Option<Self::DILocation>,
-        _span: Span,
+        span: Span,
     ) -> Self::DILocation {
-        unimplemented!();
+        let pos = span.lo();
+        let DebugLoc { file, line, col } = self.lookup_debug_loc(pos);
+        let loc = match &file.name {
+            rustc_span::FileName::Real(name) => match name {
+                rustc_span::RealFileName::LocalPath(name) => {
+                    if let Some(name) = name.to_str() {
+                        self.context.new_location(name, line as i32, col as i32)
+                    } else {
+                        Location::null()
+                    }
+                }
+                rustc_span::RealFileName::Remapped { local_path, virtual_name: _ } => {
+                    if let Some(name) = local_path.as_ref() {
+                        if let Some(name) = name.to_str() {
+                            self.context.new_location(name, line as i32, col as i32)
+                        } else {
+                            Location::null()
+                        }
+                    } else {
+                        Location::null()
+                    }
+                }
+            },
+            _ => Location::null(),
+        };
+        loc
     }
 }
diff --git a/compiler/rustc_codegen_gcc/src/declare.rs b/compiler/rustc_codegen_gcc/src/declare.rs
index 247454fa58e..db6edbab12d 100644
--- a/compiler/rustc_codegen_gcc/src/declare.rs
+++ b/compiler/rustc_codegen_gcc/src/declare.rs
@@ -1,6 +1,6 @@
-use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type};
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use gccjit::{FnAttribute, ToRValue};
+use gccjit::{Function, FunctionType, GlobalKind, LValue, RValue, Type};
 use rustc_codegen_ssa::traits::BaseTypeMethods;
 use rustc_middle::ty::Ty;
 use rustc_span::Symbol;
@@ -11,7 +11,13 @@ use crate::context::CodegenCx;
 use crate::intrinsic::llvm;
 
 impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
-    pub fn get_or_insert_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+    pub fn get_or_insert_global(
+        &self,
+        name: &str,
+        ty: Type<'gcc>,
+        is_tls: bool,
+        link_section: Option<Symbol>,
+    ) -> LValue<'gcc> {
         if self.globals.borrow().contains_key(name) {
             let typ = self.globals.borrow()[name].get_type();
             let global = self.context.new_global(None, GlobalKind::Imported, typ, name);
@@ -22,8 +28,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
                 global.set_link_section(link_section.as_str());
             }
             global
-        }
-        else {
+        } else {
             self.declare_global(name, ty, GlobalKind::Exported, is_tls, link_section)
         }
     }
@@ -33,19 +38,37 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         self.context.new_global(None, GlobalKind::Internal, ty, &name)
     }
 
-    pub fn declare_global_with_linkage(&self, name: &str, ty: Type<'gcc>, linkage: GlobalKind) -> LValue<'gcc> {
+    pub fn declare_global_with_linkage(
+        &self,
+        name: &str,
+        ty: Type<'gcc>,
+        linkage: GlobalKind,
+    ) -> LValue<'gcc> {
         let global = self.context.new_global(None, linkage, ty, name);
         let global_address = global.get_address(None);
         self.globals.borrow_mut().insert(name.to_string(), global_address);
         global
     }
 
-    pub fn declare_func(&self, name: &str, return_type: Type<'gcc>, params: &[Type<'gcc>], variadic: bool) -> Function<'gcc> {
+    pub fn declare_func(
+        &self,
+        name: &str,
+        return_type: Type<'gcc>,
+        params: &[Type<'gcc>],
+        variadic: bool,
+    ) -> Function<'gcc> {
         self.linkage.set(FunctionType::Extern);
         declare_raw_fn(self, name, () /*llvm::CCallConv*/, return_type, params, variadic)
     }
 
-    pub fn declare_global(&self, name: &str, ty: Type<'gcc>, global_kind: GlobalKind, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+    pub fn declare_global(
+        &self,
+        name: &str,
+        ty: Type<'gcc>,
+        global_kind: GlobalKind,
+        is_tls: bool,
+        link_section: Option<Symbol>,
+    ) -> LValue<'gcc> {
         let global = self.context.new_global(None, global_kind, ty, name);
         if is_tls {
             global.set_tls_model(self.tls_model);
@@ -65,13 +88,25 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         global
     }
 
-    pub fn declare_entry_fn(&self, name: &str, _fn_type: Type<'gcc>, callconv: () /*llvm::CCallConv*/) -> RValue<'gcc> {
+    pub fn declare_entry_fn(
+        &self,
+        name: &str,
+        _fn_type: Type<'gcc>,
+        callconv: (), /*llvm::CCallConv*/
+    ) -> RValue<'gcc> {
         // TODO(antoyo): use the fn_type parameter.
         let const_string = self.context.new_type::<u8>().make_pointer().make_pointer();
         let return_type = self.type_i32();
         let variadic = false;
         self.linkage.set(FunctionType::Exported);
-        let func = declare_raw_fn(self, name, callconv, return_type, &[self.type_i32(), const_string], variadic);
+        let func = declare_raw_fn(
+            self,
+            name,
+            callconv,
+            return_type,
+            &[self.type_i32(), const_string],
+            variadic,
+        );
         // NOTE: it is needed to set the current_func here as well, because get_fn() is not called
         // for the main function.
         *self.current_func.borrow_mut() = Some(func);
@@ -85,19 +120,32 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
             arguments_type,
             is_c_variadic,
             on_stack_param_indices,
-            #[cfg(feature="master")]
+            #[cfg(feature = "master")]
             fn_attributes,
         } = fn_abi.gcc_type(self);
-        let func = declare_raw_fn(self, name, () /*fn_abi.llvm_cconv()*/, return_type, &arguments_type, is_c_variadic);
+        let func = declare_raw_fn(
+            self,
+            name,
+            (), /*fn_abi.llvm_cconv()*/
+            return_type,
+            &arguments_type,
+            is_c_variadic,
+        );
         self.on_stack_function_params.borrow_mut().insert(func, on_stack_param_indices);
-        #[cfg(feature="master")]
+        #[cfg(feature = "master")]
         for fn_attr in fn_attributes {
             func.add_attribute(fn_attr);
         }
         func
     }
 
-    pub fn define_global(&self, name: &str, ty: Type<'gcc>, is_tls: bool, link_section: Option<Symbol>) -> LValue<'gcc> {
+    pub fn define_global(
+        &self,
+        name: &str,
+        ty: Type<'gcc>,
+        is_tls: bool,
+        link_section: Option<Symbol>,
+    ) -> LValue<'gcc> {
         self.get_or_insert_global(name, ty, is_tls, link_section)
     }
 
@@ -111,62 +159,84 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
 ///
 /// If there’s a value with the same name already declared, the function will
 /// update the declaration and return existing Value instead.
-fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*llvm::CallConv*/, return_type: Type<'gcc>, param_types: &[Type<'gcc>], variadic: bool) -> Function<'gcc> {
+fn declare_raw_fn<'gcc>(
+    cx: &CodegenCx<'gcc, '_>,
+    name: &str,
+    _callconv: (), /*llvm::CallConv*/
+    return_type: Type<'gcc>,
+    param_types: &[Type<'gcc>],
+    variadic: bool,
+) -> Function<'gcc> {
     if name.starts_with("llvm.") {
         let intrinsic = llvm::intrinsic(name, cx);
         cx.intrinsics.borrow_mut().insert(name.to_string(), intrinsic);
         return intrinsic;
     }
-    let func =
-        if cx.functions.borrow().contains_key(name) {
-            cx.functions.borrow()[name]
-        }
-        else {
-            let params: Vec<_> = param_types.into_iter().enumerate()
-                .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name.
+    let func = if cx.functions.borrow().contains_key(name) {
+        cx.functions.borrow()[name]
+    } else {
+        let params: Vec<_> = param_types
+            .into_iter()
+            .enumerate()
+            .map(|(index, param)| {
+                cx.context.new_parameter(None, *param, &format!("param{}", index))
+            }) // TODO(antoyo): set name.
+            .collect();
+        #[cfg(not(feature = "master"))]
+        let name = mangle_name(name);
+        let func =
+            cx.context.new_function(None, cx.linkage.get(), return_type, &params, &name, variadic);
+        cx.functions.borrow_mut().insert(name.to_string(), func);
+
+        #[cfg(feature = "master")]
+        if name == "rust_eh_personality" {
+            // NOTE: GCC will sometimes change the personality function set on a function from
+            // rust_eh_personality to __gcc_personality_v0 as an optimization.
+            // As such, we need to create a weak alias from __gcc_personality_v0 to
+            // rust_eh_personality in order to avoid a linker error.
+            // This needs to be weak in order to still allow using the standard
+            // __gcc_personality_v0 when the linking to it.
+            // Since aliases don't work (maybe because of a bug in LTO partitioning?), we
+            // create a wrapper function that calls rust_eh_personality.
+
+            let params: Vec<_> = param_types
+                .into_iter()
+                .enumerate()
+                .map(|(index, param)| {
+                    cx.context.new_parameter(None, *param, &format!("param{}", index))
+                }) // TODO(antoyo): set name.
                 .collect();
-            let func = cx.context.new_function(None, cx.linkage.get(), return_type, &params, mangle_name(name), variadic);
-            cx.functions.borrow_mut().insert(name.to_string(), func);
-
-            #[cfg(feature="master")]
-            if name == "rust_eh_personality" {
-                // NOTE: GCC will sometimes change the personality function set on a function from
-                // rust_eh_personality to __gcc_personality_v0 as an optimization.
-                // As such, we need to create a weak alias from __gcc_personality_v0 to
-                // rust_eh_personality in order to avoid a linker error.
-                // This needs to be weak in order to still allow using the standard
-                // __gcc_personality_v0 when the linking to it.
-                // Since aliases don't work (maybe because of a bug in LTO partitioning?), we
-                // create a wrapper function that calls rust_eh_personality.
-
-                let params: Vec<_> = param_types.into_iter().enumerate()
-                    .map(|(index, param)| cx.context.new_parameter(None, *param, &format!("param{}", index))) // TODO(antoyo): set name.
-                    .collect();
-                let gcc_func = cx.context.new_function(None, FunctionType::Exported, return_type, &params, "__gcc_personality_v0", variadic);
-
-                // We need a normal extern function for the crates that access rust_eh_personality
-                // without defining it, otherwise we'll get a compiler error.
-                //
-                // For the crate defining it, that needs to be a weak alias instead.
-                gcc_func.add_attribute(FnAttribute::Weak);
-
-                let block = gcc_func.new_block("start");
-                let mut args = vec![];
-                for param in &params {
-                    args.push(param.to_rvalue());
-                }
-                let call = cx.context.new_call(None, func, &args);
-                if return_type == cx.type_void() {
-                    block.add_eval(None, call);
-                    block.end_with_void_return(None);
-                }
-                else {
-                    block.end_with_return(None, call);
-                }
+            let gcc_func = cx.context.new_function(
+                None,
+                FunctionType::Exported,
+                return_type,
+                &params,
+                "__gcc_personality_v0",
+                variadic,
+            );
+
+            // We need a normal extern function for the crates that access rust_eh_personality
+            // without defining it, otherwise we'll get a compiler error.
+            //
+            // For the crate defining it, that needs to be a weak alias instead.
+            gcc_func.add_attribute(FnAttribute::Weak);
+
+            let block = gcc_func.new_block("start");
+            let mut args = vec![];
+            for param in &params {
+                args.push(param.to_rvalue());
             }
+            let call = cx.context.new_call(None, func, &args);
+            if return_type == cx.type_void() {
+                block.add_eval(None, call);
+                block.end_with_void_return(None);
+            } else {
+                block.end_with_return(None, call);
+            }
+        }
 
-            func
-        };
+        func
+    };
 
     // TODO(antoyo): set function calling convention.
     // TODO(antoyo): set unnamed address.
@@ -179,15 +249,24 @@ fn declare_raw_fn<'gcc>(cx: &CodegenCx<'gcc, '_>, name: &str, _callconv: () /*ll
 }
 
 // FIXME(antoyo): this is a hack because libgccjit currently only supports alpha, num and _.
-// Unsupported characters: `$` and `.`.
-pub fn mangle_name(name: &str) -> String {
-    name.replace(|char: char| {
-        if !char.is_alphanumeric() && char != '_' {
-            debug_assert!("$.*".contains(char), "Unsupported char in function name {}: {}", name, char);
-            true
-        }
-        else {
-            false
-        }
-    }, "_")
+// Unsupported characters: `$`, `.` and `*`.
+// FIXME(antoyo): `*` might not be expected: https://github.com/rust-lang/rust/issues/116979#issuecomment-1840926865
+#[cfg(not(feature = "master"))]
+fn mangle_name(name: &str) -> String {
+    name.replace(
+        |char: char| {
+            if !char.is_alphanumeric() && char != '_' {
+                debug_assert!(
+                    "$.*".contains(char),
+                    "Unsupported char in function name {}: {}",
+                    name,
+                    char
+                );
+                true
+            } else {
+                false
+            }
+        },
+        "_",
+    )
 }
diff --git a/compiler/rustc_codegen_gcc/src/errors.rs b/compiler/rustc_codegen_gcc/src/errors.rs
index 988a7e1033e..f963a153fba 100644
--- a/compiler/rustc_codegen_gcc/src/errors.rs
+++ b/compiler/rustc_codegen_gcc/src/errors.rs
@@ -1,9 +1,6 @@
-use rustc_errors::{
-    DiagCtxt, DiagArgValue, Diag, EmissionGuarantee, IntoDiagnostic, IntoDiagnosticArg, Level,
-};
+use rustc_errors::{Diag, DiagCtxt, EmissionGuarantee, IntoDiagnostic, Level};
 use rustc_macros::{Diagnostic, Subdiagnostic};
 use rustc_span::Span;
-use std::borrow::Cow;
 
 use crate::fluent_generated as fluent;
 
@@ -31,18 +28,6 @@ pub(crate) enum PossibleFeature<'a> {
     None,
 }
 
-struct ExitCode(Option<i32>);
-
-impl IntoDiagnosticArg for ExitCode {
-    fn into_diagnostic_arg(self) -> DiagArgValue {
-        let ExitCode(exit_code) = self;
-        match exit_code {
-            Some(t) => t.into_diagnostic_arg(),
-            None => DiagArgValue::Str(Cow::Borrowed("<signal>")),
-        }
-    }
-}
-
 #[derive(Diagnostic)]
 #[diag(codegen_gcc_lto_not_supported)]
 pub(crate) struct LTONotSupported;
@@ -81,12 +66,6 @@ pub(crate) struct CopyBitcode {
 pub(crate) struct DynamicLinkingWithLTO;
 
 #[derive(Diagnostic)]
-#[diag(codegen_gcc_load_bitcode)]
-pub(crate) struct LoadBitcode {
-    name: String,
-}
-
-#[derive(Diagnostic)]
 #[diag(codegen_gcc_lto_disallowed)]
 pub(crate) struct LtoDisallowed;
 
diff --git a/compiler/rustc_codegen_gcc/src/gcc_util.rs b/compiler/rustc_codegen_gcc/src/gcc_util.rs
index 4babe5bfb81..53877e8ff7f 100644
--- a/compiler/rustc_codegen_gcc/src/gcc_util.rs
+++ b/compiler/rustc_codegen_gcc/src/gcc_util.rs
@@ -1,4 +1,4 @@
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use gccjit::Context;
 use smallvec::{smallvec, SmallVec};
 
@@ -7,7 +7,10 @@ use rustc_middle::bug;
 use rustc_session::Session;
 use rustc_target::target_features::RUSTC_SPECIFIC_FEATURES;
 
-use crate::errors::{PossibleFeature, TargetFeatureDisableOrEnable, UnknownCTargetFeature, UnknownCTargetFeaturePrefix};
+use crate::errors::{
+    PossibleFeature, TargetFeatureDisableOrEnable, UnknownCTargetFeature,
+    UnknownCTargetFeaturePrefix,
+};
 
 /// The list of GCC features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
 /// `--target` and similar).
@@ -44,7 +47,10 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri
     // -Ctarget-features
     let supported_features = sess.target.supported_target_features();
     let mut featsmap = FxHashMap::default();
-    let feats = sess.opts.cg.target_feature
+    let feats = sess
+        .opts
+        .cg
+        .target_feature
         .split(',')
         .filter_map(|s| {
             let enable_disable = match s.chars().next() {
@@ -69,16 +75,14 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri
                         None
                     }
                 });
-                let unknown_feature =
-                    if let Some(rust_feature) = rust_feature {
-                        UnknownCTargetFeature {
-                            feature,
-                            rust_feature: PossibleFeature::Some { rust_feature },
-                        }
+                let unknown_feature = if let Some(rust_feature) = rust_feature {
+                    UnknownCTargetFeature {
+                        feature,
+                        rust_feature: PossibleFeature::Some { rust_feature },
                     }
-                    else {
-                        UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None }
-                    };
+                } else {
+                    UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None }
+                };
                 sess.dcx().emit_warn(unknown_feature);
             }
 
@@ -95,18 +99,18 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri
             // passing requests down to GCC. This means that all in-language
             // features also work on the command line instead of having two
             // different names when the GCC name and the Rust name differ.
-            Some(to_gcc_features(sess, feature)
-                .iter()
-                .flat_map(|feat| to_gcc_features(sess, feat).into_iter())
-                .map(|feature| {
-                    if enable_disable == '-' {
-                        format!("-{}", feature)
-                    }
-                    else {
-                        feature.to_string()
-                    }
-                })
-                .collect::<Vec<_>>(),
+            Some(
+                to_gcc_features(sess, feature)
+                    .iter()
+                    .flat_map(|feat| to_gcc_features(sess, feat).into_iter())
+                    .map(|feature| {
+                        if enable_disable == '-' {
+                            format!("-{}", feature)
+                        } else {
+                            feature.to_string()
+                        }
+                    })
+                    .collect::<Vec<_>>(),
             )
         })
         .flatten();
@@ -184,7 +188,10 @@ pub fn to_gcc_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2]>
 
 // Given a map from target_features to whether they are enabled or disabled,
 // ensure only valid combinations are allowed.
-pub fn check_tied_features(sess: &Session, features: &FxHashMap<&str, bool>) -> Option<&'static [&'static str]> {
+pub fn check_tied_features(
+    sess: &Session,
+    features: &FxHashMap<&str, bool>,
+) -> Option<&'static [&'static str]> {
     for tied in sess.target.tied_target_features() {
         // Tied features must be set to the same value, or not set at all
         let mut tied_iter = tied.iter();
@@ -199,7 +206,7 @@ pub fn check_tied_features(sess: &Session, features: &FxHashMap<&str, bool>) ->
 fn arch_to_gcc(name: &str) -> &str {
     match name {
         "M68020" => "68020",
-         _ => name,
+        _ => name,
     }
 }
 
@@ -208,15 +215,13 @@ fn handle_native(name: &str) -> &str {
         return arch_to_gcc(name);
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     {
         // Get the native arch.
         let context = Context::default();
-        context.get_target_info().arch().unwrap()
-            .to_str()
-            .unwrap()
+        context.get_target_info().arch().unwrap().to_str().unwrap()
     }
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     unimplemented!();
 }
 
diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs
index 9b9b3ea4f87..841bcf592e4 100644
--- a/compiler/rustc_codegen_gcc/src/int.rs
+++ b/compiler/rustc_codegen_gcc/src/int.rs
@@ -4,15 +4,22 @@
 
 use std::convert::TryFrom;
 
-use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp};
+use gccjit::{BinaryOp, ComparisonOp, FunctionType, Location, RValue, ToRValue, Type, UnaryOp};
 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
 use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
 use rustc_middle::ty::{ParamEnv, Ty};
-use rustc_target::abi::{Endian, call::{ArgAbi, ArgAttributes, Conv, FnAbi, PassMode}};
+use rustc_target::abi::{
+    call::{ArgAbi, ArgAttributes, Conv, FnAbi, PassMode},
+    Endian,
+};
 use rustc_target::spec;
 
 use crate::builder::ToGccComp;
-use crate::{builder::Builder, common::{SignType, TypeReflection}, context::CodegenCx};
+use crate::{
+    builder::Builder,
+    common::{SignType, TypeReflection},
+    context::CodegenCx,
+};
 
 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
     pub fn gcc_urem(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
@@ -29,35 +36,39 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let typ = a.get_type();
         if self.is_native_int_type_or_bool(typ) {
             let operation =
-                if typ.is_bool() {
-                    UnaryOp::LogicalNegate
-                }
-                else {
-                    UnaryOp::BitwiseNegate
-                };
-            self.cx.context.new_unary_op(None, operation, typ, a)
-        }
-        else {
+                if typ.is_bool() { UnaryOp::LogicalNegate } else { UnaryOp::BitwiseNegate };
+            self.cx.context.new_unary_op(self.location, operation, typ, a)
+        } else {
             let element_type = typ.dyncast_array().expect("element type");
-            self.from_low_high_rvalues(typ,
-                self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)),
-                self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)),
+            self.from_low_high_rvalues(
+                typ,
+                self.cx.context.new_unary_op(
+                    self.location,
+                    UnaryOp::BitwiseNegate,
+                    element_type,
+                    self.low(a),
+                ),
+                self.cx.context.new_unary_op(
+                    self.location,
+                    UnaryOp::BitwiseNegate,
+                    element_type,
+                    self.high(a),
+                ),
             )
         }
     }
 
     pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> {
         let a_type = a.get_type();
-        if self.is_native_int_type(a_type) {
-            self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
-        }
-        else {
+        if self.is_native_int_type(a_type) || a_type.is_vector() {
+            self.cx.context.new_unary_op(self.location, UnaryOp::Minus, a.get_type(), a)
+        } else {
             self.gcc_add(self.gcc_not(a), self.gcc_int(a_type, 1))
         }
     }
 
     pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b)
+        self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b, self.location)
     }
 
     pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
@@ -69,20 +80,16 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number.
             // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
             if a_type.is_signed(self) != b_type.is_signed(self) {
-                let b = self.context.new_cast(None, b, a_type);
+                let b = self.context.new_cast(self.location, b, a_type);
                 a >> b
-            }
-            else {
+            } else {
                 a >> b
             }
-        }
-        else if a_type.is_vector() && a_type.is_vector() {
+        } else if a_type.is_vector() && a_type.is_vector() {
             a >> b
-        }
-        else if a_native && !b_native {
+        } else if a_native && !b_native {
             self.gcc_lshr(a, self.gcc_int_cast(b, a_type))
-        }
-        else {
+        } else {
             // NOTE: we cannot use the lshr builtin because it's calling hi() (to get the most
             // significant half of the number) which uses lshr.
 
@@ -95,46 +102,38 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             let b0_block = func.new_block("b0");
             let actual_else_block = func.new_block("actual_else");
 
-            let result = func.new_local(None, a_type, "shiftResult");
+            let result = func.new_local(self.location, a_type, "shiftResult");
 
             let sixty_four = self.gcc_int(native_int_type, 64);
             let sixty_three = self.gcc_int(native_int_type, 63);
             let zero = self.gcc_zero(native_int_type);
             let b = self.gcc_int_cast(b, native_int_type);
             let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
-            self.llbb().end_with_conditional(None, condition, then_block, else_block);
+            self.llbb().end_with_conditional(self.location, condition, then_block, else_block);
 
             let shift_value = self.gcc_sub(b, sixty_four);
             let high = self.high(a);
-            let sign =
-                if a_type.is_signed(self) {
-                    high >> sixty_three
-                }
-                else {
-                    zero
-                };
+            let sign = if a_type.is_signed(self) { high >> sixty_three } else { zero };
             let array_value = self.from_low_high_rvalues(a_type, high >> shift_value, sign);
-            then_block.add_assignment(None, result, array_value);
-            then_block.end_with_jump(None, after_block);
+            then_block.add_assignment(self.location, result, array_value);
+            then_block.end_with_jump(self.location, after_block);
 
             let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
-            else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
+            else_block.end_with_conditional(self.location, condition, b0_block, actual_else_block);
 
-            b0_block.add_assignment(None, result, a);
-            b0_block.end_with_jump(None, after_block);
+            b0_block.add_assignment(self.location, result, a);
+            b0_block.end_with_jump(self.location, after_block);
 
             let shift_value = self.gcc_sub(sixty_four, b);
             // NOTE: cast low to its unsigned type in order to perform a logical right shift.
             let unsigned_type = native_int_type.to_unsigned(&self.cx);
-            let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
-            let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type);
-            let shifted_low = self.context.new_cast(None, shifted_low, native_int_type);
-            let array_value = self.from_low_high_rvalues(a_type,
-                (high << shift_value) | shifted_low,
-                high >> b,
-            );
-            actual_else_block.add_assignment(None, result, array_value);
-            actual_else_block.end_with_jump(None, after_block);
+            let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type);
+            let shifted_low = casted_low >> self.context.new_cast(self.location, b, unsigned_type);
+            let shifted_low = self.context.new_cast(self.location, shifted_low, native_int_type);
+            let array_value =
+                self.from_low_high_rvalues(a_type, (high << shift_value) | shifted_low, high >> b);
+            actual_else_block.add_assignment(self.location, result, array_value);
+            actual_else_block.end_with_jump(self.location, after_block);
 
             // NOTE: since jumps were added in a place rustc does not expect, the current block in the
             // state need to be updated.
@@ -144,38 +143,49 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         }
     }
 
-    fn additive_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+    fn additive_operation(
+        &self,
+        operation: BinaryOp,
+        a: RValue<'gcc>,
+        mut b: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         let a_type = a.get_type();
         let b_type = b.get_type();
-        if (self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type)) || (a_type.is_vector() && b_type.is_vector()) {
+        if (self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type))
+            || (a_type.is_vector() && b_type.is_vector())
+        {
             if a_type != b_type {
                 if a_type.is_vector() {
                     // Vector types need to be bitcast.
                     // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
-                    b = self.context.new_bitcast(None, b, a.get_type());
-                }
-                else {
-                    b = self.context.new_cast(None, b, a.get_type());
+                    b = self.context.new_bitcast(self.location, b, a.get_type());
+                } else {
+                    b = self.context.new_cast(self.location, b, a.get_type());
                 }
             }
-            self.context.new_binary_op(None, operation, a_type, a, b)
-        }
-        else {
+            self.context.new_binary_op(self.location, operation, a_type, a, b)
+        } else {
             debug_assert!(a_type.dyncast_array().is_some());
             debug_assert!(b_type.dyncast_array().is_some());
             let signed = a_type.is_compatible_with(self.i128_type);
-            let func_name =
-                match (operation, signed) {
-                    (BinaryOp::Plus, true) => "__rust_i128_add",
-                    (BinaryOp::Plus, false) => "__rust_u128_add",
-                    (BinaryOp::Minus, true) => "__rust_i128_sub",
-                    (BinaryOp::Minus, false) => "__rust_u128_sub",
-                    _ => unreachable!("unexpected additive operation {:?}", operation),
-                };
-            let param_a = self.context.new_parameter(None, a_type, "a");
-            let param_b = self.context.new_parameter(None, b_type, "b");
-            let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
-            self.context.new_call(None, func, &[a, b])
+            let func_name = match (operation, signed) {
+                (BinaryOp::Plus, true) => "__rust_i128_add",
+                (BinaryOp::Plus, false) => "__rust_u128_add",
+                (BinaryOp::Minus, true) => "__rust_i128_sub",
+                (BinaryOp::Minus, false) => "__rust_u128_sub",
+                _ => unreachable!("unexpected additive operation {:?}", operation),
+            };
+            let param_a = self.context.new_parameter(self.location, a_type, "a");
+            let param_b = self.context.new_parameter(self.location, b_type, "b");
+            let func = self.context.new_function(
+                self.location,
+                FunctionType::Extern,
+                a_type,
+                &[param_a, param_b],
+                func_name,
+                false,
+            );
+            self.context.new_call(self.location, func, &[a, b])
         }
     }
 
@@ -191,27 +201,36 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         self.additive_operation(BinaryOp::Minus, a, b)
     }
 
-    fn multiplicative_operation(&self, operation: BinaryOp, operation_name: &str, signed: bool, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
+    fn multiplicative_operation(
+        &self,
+        operation: BinaryOp,
+        operation_name: &str,
+        signed: bool,
+        a: RValue<'gcc>,
+        b: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         let a_type = a.get_type();
         let b_type = b.get_type();
-        if (self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type)) || (a_type.is_vector() && b_type.is_vector()) {
-            self.context.new_binary_op(None, operation, a_type, a, b)
-        }
-        else {
+        if (self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type))
+            || (a_type.is_vector() && b_type.is_vector())
+        {
+            self.context.new_binary_op(self.location, operation, a_type, a, b)
+        } else {
             debug_assert!(a_type.dyncast_array().is_some());
             debug_assert!(b_type.dyncast_array().is_some());
-            let sign =
-                if signed {
-                    ""
-                }
-                else {
-                    "u"
-                };
+            let sign = if signed { "" } else { "u" };
             let func_name = format!("__{}{}ti3", sign, operation_name);
-            let param_a = self.context.new_parameter(None, a_type, "a");
-            let param_b = self.context.new_parameter(None, b_type, "b");
-            let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
-            self.context.new_call(None, func, &[a, b])
+            let param_a = self.context.new_parameter(self.location, a_type, "a");
+            let param_b = self.context.new_parameter(self.location, b_type, "b");
+            let func = self.context.new_function(
+                self.location,
+                FunctionType::Extern,
+                a_type,
+                &[param_a, param_b],
+                func_name,
+                false,
+            );
+            self.context.new_call(self.location, func, &[a, b])
         }
     }
 
@@ -227,137 +246,133 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         self.multiplicative_operation(BinaryOp::Divide, "div", false, a, b)
     }
 
-    pub fn gcc_checked_binop(&self, oop: OverflowOp, typ: Ty<'_>, lhs: <Self as BackendTypes>::Value, rhs: <Self as BackendTypes>::Value) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
+    pub fn gcc_checked_binop(
+        &self,
+        oop: OverflowOp,
+        typ: Ty<'_>,
+        lhs: <Self as BackendTypes>::Value,
+        rhs: <Self as BackendTypes>::Value,
+    ) -> (<Self as BackendTypes>::Value, <Self as BackendTypes>::Value) {
         use rustc_middle::ty::{Int, IntTy::*, Uint, UintTy::*};
 
-        let new_kind =
-            match typ.kind() {
-                Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
-                Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
-                t @ (Uint(_) | Int(_)) => t.clone(),
-                _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
-            };
+        let new_kind = match typ.kind() {
+            Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)),
+            Uint(t @ Usize) => Uint(t.normalize(self.tcx.sess.target.pointer_width)),
+            t @ (Uint(_) | Int(_)) => t.clone(),
+            _ => panic!("tried to get overflow intrinsic for op applied to non-int type"),
+        };
 
         // TODO(antoyo): remove duplication with intrinsic?
-        let name =
-            if self.is_native_int_type(lhs.get_type()) {
-                match oop {
-                    OverflowOp::Add =>
-                        match new_kind {
-                            Int(I8) => "__builtin_add_overflow",
-                            Int(I16) => "__builtin_add_overflow",
-                            Int(I32) => "__builtin_sadd_overflow",
-                            Int(I64) => "__builtin_saddll_overflow",
-                            Int(I128) => "__builtin_add_overflow",
-
-                            Uint(U8) => "__builtin_add_overflow",
-                            Uint(U16) => "__builtin_add_overflow",
-                            Uint(U32) => "__builtin_uadd_overflow",
-                            Uint(U64) => "__builtin_uaddll_overflow",
-                            Uint(U128) => "__builtin_add_overflow",
-
+        let name = if self.is_native_int_type(lhs.get_type()) {
+            match oop {
+                OverflowOp::Add => match new_kind {
+                    Int(I8) => "__builtin_add_overflow",
+                    Int(I16) => "__builtin_add_overflow",
+                    Int(I32) => "__builtin_sadd_overflow",
+                    Int(I64) => "__builtin_saddll_overflow",
+                    Int(I128) => "__builtin_add_overflow",
+
+                    Uint(U8) => "__builtin_add_overflow",
+                    Uint(U16) => "__builtin_add_overflow",
+                    Uint(U32) => "__builtin_uadd_overflow",
+                    Uint(U64) => "__builtin_uaddll_overflow",
+                    Uint(U128) => "__builtin_add_overflow",
+
+                    _ => unreachable!(),
+                },
+                OverflowOp::Sub => match new_kind {
+                    Int(I8) => "__builtin_sub_overflow",
+                    Int(I16) => "__builtin_sub_overflow",
+                    Int(I32) => "__builtin_ssub_overflow",
+                    Int(I64) => "__builtin_ssubll_overflow",
+                    Int(I128) => "__builtin_sub_overflow",
+
+                    Uint(U8) => "__builtin_sub_overflow",
+                    Uint(U16) => "__builtin_sub_overflow",
+                    Uint(U32) => "__builtin_usub_overflow",
+                    Uint(U64) => "__builtin_usubll_overflow",
+                    Uint(U128) => "__builtin_sub_overflow",
+
+                    _ => unreachable!(),
+                },
+                OverflowOp::Mul => match new_kind {
+                    Int(I8) => "__builtin_mul_overflow",
+                    Int(I16) => "__builtin_mul_overflow",
+                    Int(I32) => "__builtin_smul_overflow",
+                    Int(I64) => "__builtin_smulll_overflow",
+                    Int(I128) => "__builtin_mul_overflow",
+
+                    Uint(U8) => "__builtin_mul_overflow",
+                    Uint(U16) => "__builtin_mul_overflow",
+                    Uint(U32) => "__builtin_umul_overflow",
+                    Uint(U64) => "__builtin_umulll_overflow",
+                    Uint(U128) => "__builtin_mul_overflow",
+
+                    _ => unreachable!(),
+                },
+            }
+        } else {
+            match new_kind {
+                Int(I128) | Uint(U128) => {
+                    let func_name = match oop {
+                        OverflowOp::Add => match new_kind {
+                            Int(I128) => "__rust_i128_addo",
+                            Uint(U128) => "__rust_u128_addo",
                             _ => unreachable!(),
                         },
-                    OverflowOp::Sub =>
-                        match new_kind {
-                            Int(I8) => "__builtin_sub_overflow",
-                            Int(I16) => "__builtin_sub_overflow",
-                            Int(I32) => "__builtin_ssub_overflow",
-                            Int(I64) => "__builtin_ssubll_overflow",
-                            Int(I128) => "__builtin_sub_overflow",
-
-                            Uint(U8) => "__builtin_sub_overflow",
-                            Uint(U16) => "__builtin_sub_overflow",
-                            Uint(U32) => "__builtin_usub_overflow",
-                            Uint(U64) => "__builtin_usubll_overflow",
-                            Uint(U128) => "__builtin_sub_overflow",
-
+                        OverflowOp::Sub => match new_kind {
+                            Int(I128) => "__rust_i128_subo",
+                            Uint(U128) => "__rust_u128_subo",
                             _ => unreachable!(),
                         },
-                    OverflowOp::Mul =>
-                        match new_kind {
-                            Int(I8) => "__builtin_mul_overflow",
-                            Int(I16) => "__builtin_mul_overflow",
-                            Int(I32) => "__builtin_smul_overflow",
-                            Int(I64) => "__builtin_smulll_overflow",
-                            Int(I128) => "__builtin_mul_overflow",
-
-                            Uint(U8) => "__builtin_mul_overflow",
-                            Uint(U16) => "__builtin_mul_overflow",
-                            Uint(U32) => "__builtin_umul_overflow",
-                            Uint(U64) => "__builtin_umulll_overflow",
-                            Uint(U128) => "__builtin_mul_overflow",
-
+                        OverflowOp::Mul => match new_kind {
+                            Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead?
+                            Uint(U128) => "__rust_u128_mulo",
                             _ => unreachable!(),
                         },
+                    };
+                    return self.operation_with_overflow(func_name, lhs, rhs);
                 }
-            }
-            else {
-                match new_kind {
-                    Int(I128) | Uint(U128) => {
-                        let func_name =
-                            match oop {
-                                OverflowOp::Add =>
-                                    match new_kind {
-                                        Int(I128) => "__rust_i128_addo",
-                                        Uint(U128) => "__rust_u128_addo",
-                                        _ => unreachable!(),
-                                    },
-                                OverflowOp::Sub =>
-                                    match new_kind {
-                                        Int(I128) => "__rust_i128_subo",
-                                        Uint(U128) => "__rust_u128_subo",
-                                        _ => unreachable!(),
-                                    },
-                                OverflowOp::Mul =>
-                                    match new_kind {
-                                        Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead?
-                                        Uint(U128) => "__rust_u128_mulo",
-                                        _ => unreachable!(),
-                                    },
-                            };
-                        return self.operation_with_overflow(func_name, lhs, rhs);
+                _ => match oop {
+                    OverflowOp::Mul => match new_kind {
+                        Int(I32) => "__mulosi4",
+                        Int(I64) => "__mulodi4",
+                        _ => unreachable!(),
                     },
-                    _ => {
-                        match oop {
-                            OverflowOp::Mul =>
-                                match new_kind {
-                                    Int(I32) => "__mulosi4",
-                                    Int(I64) => "__mulodi4",
-                                    _ => unreachable!(),
-                                },
-                            _ => unimplemented!("overflow operation for {:?}", new_kind),
-                        }
-                    }
-                }
-            };
+                    _ => unimplemented!("overflow operation for {:?}", new_kind),
+                },
+            }
+        };
 
         let intrinsic = self.context.get_builtin_function(&name);
-        let res = self.current_func()
+        let res = self
+            .current_func()
             // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
-            .new_local(None, rhs.get_type(), "binopResult")
-            .get_address(None);
+            .new_local(self.location, rhs.get_type(), "binopResult")
+            .get_address(self.location);
         let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
-        (res.dereference(None).to_rvalue(), overflow)
+        (res.dereference(self.location).to_rvalue(), overflow)
     }
 
-    pub fn operation_with_overflow(&self, func_name: &str, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
+    pub fn operation_with_overflow(
+        &self,
+        func_name: &str,
+        lhs: RValue<'gcc>,
+        rhs: RValue<'gcc>,
+    ) -> (RValue<'gcc>, RValue<'gcc>) {
         let a_type = lhs.get_type();
         let b_type = rhs.get_type();
         debug_assert!(a_type.dyncast_array().is_some());
         debug_assert!(b_type.dyncast_array().is_some());
-        let param_a = self.context.new_parameter(None, a_type, "a");
-        let param_b = self.context.new_parameter(None, b_type, "b");
-        let result_field = self.context.new_field(None, a_type, "result");
-        let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
+        let param_a = self.context.new_parameter(self.location, a_type, "a");
+        let param_b = self.context.new_parameter(self.location, b_type, "b");
+        let result_field = self.context.new_field(self.location, a_type, "result");
+        let overflow_field = self.context.new_field(self.location, self.bool_type, "overflow");
 
         let ret_ty = Ty::new_tup(self.tcx, &[self.tcx.types.i128, self.tcx.types.bool]);
         let layout = self.tcx.layout_of(ParamEnv::reveal_all().and(ret_ty)).unwrap();
 
-        let arg_abi = ArgAbi {
-            layout,
-            mode: PassMode::Direct(ArgAttributes::new()),
-        };
+        let arg_abi = ArgAbi { layout, mode: PassMode::Direct(ArgAttributes::new()) };
         let mut fn_abi = FnAbi {
             args: vec![arg_abi.clone(), arg_abi.clone()].into_boxed_slice(),
             ret: arg_abi,
@@ -366,38 +381,66 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             conv: Conv::C,
             can_unwind: false,
         };
-        fn_abi.adjust_for_foreign_abi(self.cx, spec::abi::Abi::C {
-            unwind: false,
-        }).unwrap();
+        fn_abi.adjust_for_foreign_abi(self.cx, spec::abi::Abi::C { unwind: false }).unwrap();
 
         let indirect = matches!(fn_abi.ret.mode, PassMode::Indirect { .. });
 
-        let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
-        let result =
-            if indirect {
-                let return_value = self.current_func().new_local(None, return_type.as_type(), "return_value");
-                let return_param_type = return_type.as_type().make_pointer();
-                let return_param = self.context.new_parameter(None, return_param_type, "return_value");
-                let func = self.context.new_function(None, FunctionType::Extern, self.type_void(), &[return_param, param_a, param_b], func_name, false);
-                self.llbb().add_eval(None, self.context.new_call(None, func, &[return_value.get_address(None), lhs, rhs]));
-                return_value.to_rvalue()
-            }
-            else {
-                let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
-                self.context.new_call(None, func, &[lhs, rhs])
-            };
-        let overflow = result.access_field(None, overflow_field);
-        let int_result = result.access_field(None, result_field);
-        return (int_result, overflow);
+        let return_type = self.context.new_struct_type(
+            self.location,
+            "result_overflow",
+            &[result_field, overflow_field],
+        );
+        let result = if indirect {
+            let return_value =
+                self.current_func().new_local(self.location, return_type.as_type(), "return_value");
+            let return_param_type = return_type.as_type().make_pointer();
+            let return_param =
+                self.context.new_parameter(self.location, return_param_type, "return_value");
+            let func = self.context.new_function(
+                self.location,
+                FunctionType::Extern,
+                self.type_void(),
+                &[return_param, param_a, param_b],
+                func_name,
+                false,
+            );
+            self.llbb().add_eval(
+                self.location,
+                self.context.new_call(
+                    self.location,
+                    func,
+                    &[return_value.get_address(self.location), lhs, rhs],
+                ),
+            );
+            return_value.to_rvalue()
+        } else {
+            let func = self.context.new_function(
+                self.location,
+                FunctionType::Extern,
+                return_type.as_type(),
+                &[param_a, param_b],
+                func_name,
+                false,
+            );
+            self.context.new_call(self.location, func, &[lhs, rhs])
+        };
+        let overflow = result.access_field(self.location, overflow_field);
+        let int_result = result.access_field(self.location, result_field);
+        (int_result, overflow)
     }
 
-    pub fn gcc_icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
+    pub fn gcc_icmp(
+        &mut self,
+        op: IntPredicate,
+        mut lhs: RValue<'gcc>,
+        mut rhs: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         let a_type = lhs.get_type();
         let b_type = rhs.get_type();
         if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) {
             // This algorithm is based on compiler-rt's __cmpti2:
             // https://github.com/llvm-mirror/compiler-rt/blob/f0745e8476f069296a7c71accedd061dce4cdf79/lib/builtins/cmpti2.c#L21
-            let result = self.current_func().new_local(None, self.int_type, "icmp_result");
+            let result = self.current_func().new_local(self.location, self.int_type, "icmp_result");
             let block1 = self.current_func().new_block("block1");
             let block2 = self.current_func().new_block("block2");
             let block3 = self.current_func().new_block("block3");
@@ -413,92 +456,149 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             // the sign is only on high).
             let unsigned_type = native_int_type.to_unsigned(&self.cx);
 
-            let lhs_low = self.context.new_cast(None, self.low(lhs), unsigned_type);
-            let rhs_low = self.context.new_cast(None, self.low(rhs), unsigned_type);
+            let lhs_low = self.context.new_cast(self.location, self.low(lhs), unsigned_type);
+            let rhs_low = self.context.new_cast(self.location, self.low(rhs), unsigned_type);
 
-            let condition = self.context.new_comparison(None, ComparisonOp::LessThan, self.high(lhs), self.high(rhs));
-            self.llbb().end_with_conditional(None, condition, block1, block2);
+            let condition = self.context.new_comparison(
+                self.location,
+                ComparisonOp::LessThan,
+                self.high(lhs),
+                self.high(rhs),
+            );
+            self.llbb().end_with_conditional(self.location, condition, block1, block2);
 
-            block1.add_assignment(None, result, self.context.new_rvalue_zero(self.int_type));
-            block1.end_with_jump(None, after);
+            block1.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_zero(self.int_type),
+            );
+            block1.end_with_jump(self.location, after);
 
-            let condition = self.context.new_comparison(None, ComparisonOp::GreaterThan, self.high(lhs), self.high(rhs));
-            block2.end_with_conditional(None, condition, block3, block4);
+            let condition = self.context.new_comparison(
+                self.location,
+                ComparisonOp::GreaterThan,
+                self.high(lhs),
+                self.high(rhs),
+            );
+            block2.end_with_conditional(self.location, condition, block3, block4);
 
-            block3.add_assignment(None, result, self.context.new_rvalue_from_int(self.int_type, 2));
-            block3.end_with_jump(None, after);
+            block3.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_from_int(self.int_type, 2),
+            );
+            block3.end_with_jump(self.location, after);
 
-            let condition = self.context.new_comparison(None, ComparisonOp::LessThan, lhs_low, rhs_low);
-            block4.end_with_conditional(None, condition, block5, block6);
+            let condition = self.context.new_comparison(
+                self.location,
+                ComparisonOp::LessThan,
+                lhs_low,
+                rhs_low,
+            );
+            block4.end_with_conditional(self.location, condition, block5, block6);
 
-            block5.add_assignment(None, result, self.context.new_rvalue_zero(self.int_type));
-            block5.end_with_jump(None, after);
+            block5.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_zero(self.int_type),
+            );
+            block5.end_with_jump(self.location, after);
 
-            let condition = self.context.new_comparison(None, ComparisonOp::GreaterThan, lhs_low, rhs_low);
-            block6.end_with_conditional(None, condition, block7, block8);
+            let condition = self.context.new_comparison(
+                self.location,
+                ComparisonOp::GreaterThan,
+                lhs_low,
+                rhs_low,
+            );
+            block6.end_with_conditional(self.location, condition, block7, block8);
 
-            block7.add_assignment(None, result, self.context.new_rvalue_from_int(self.int_type, 2));
-            block7.end_with_jump(None, after);
+            block7.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_from_int(self.int_type, 2),
+            );
+            block7.end_with_jump(self.location, after);
 
-            block8.add_assignment(None, result, self.context.new_rvalue_one(self.int_type));
-            block8.end_with_jump(None, after);
+            block8.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_one(self.int_type),
+            );
+            block8.end_with_jump(self.location, after);
 
             // NOTE: since jumps were added in a place rustc does not expect, the current block in the
             // state need to be updated.
             self.switch_to_block(after);
 
             let cmp = result.to_rvalue();
-            let (op, limit) =
-                match op {
-                    IntPredicate::IntEQ => {
-                        return self.context.new_comparison(None, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type));
-                    },
-                    IntPredicate::IntNE => {
-                        return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type));
-                    },
-                    // TODO(antoyo): cast to u128 for unsigned comparison. See below.
-                    IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
-                    IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
-                    IntPredicate::IntULT => (ComparisonOp::Equals, 0),
-                    IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1),
-                    IntPredicate::IntSGT => (ComparisonOp::Equals, 2),
-                    IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1),
-                    IntPredicate::IntSLT => (ComparisonOp::Equals, 0),
-                    IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1),
-                };
-            self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit))
-        }
-        else if a_type.get_pointee().is_some() && b_type.get_pointee().is_some() {
+            let (op, limit) = match op {
+                IntPredicate::IntEQ => {
+                    return self.context.new_comparison(
+                        self.location,
+                        ComparisonOp::Equals,
+                        cmp,
+                        self.context.new_rvalue_one(self.int_type),
+                    );
+                }
+                IntPredicate::IntNE => {
+                    return self.context.new_comparison(
+                        self.location,
+                        ComparisonOp::NotEquals,
+                        cmp,
+                        self.context.new_rvalue_one(self.int_type),
+                    );
+                }
+                // TODO(antoyo): cast to u128 for unsigned comparison. See below.
+                IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
+                IntPredicate::IntUGE => (ComparisonOp::GreaterThanEquals, 1),
+                IntPredicate::IntULT => (ComparisonOp::Equals, 0),
+                IntPredicate::IntULE => (ComparisonOp::LessThanEquals, 1),
+                IntPredicate::IntSGT => (ComparisonOp::Equals, 2),
+                IntPredicate::IntSGE => (ComparisonOp::GreaterThanEquals, 1),
+                IntPredicate::IntSLT => (ComparisonOp::Equals, 0),
+                IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1),
+            };
+            self.context.new_comparison(
+                self.location,
+                op,
+                cmp,
+                self.context.new_rvalue_from_int(self.int_type, limit),
+            )
+        } else if a_type.get_pointee().is_some() && b_type.get_pointee().is_some() {
             // NOTE: gcc cannot compare pointers to different objects, but rustc does that, so cast them to usize.
-            lhs = self.context.new_bitcast(None, lhs, self.usize_type);
-            rhs = self.context.new_bitcast(None, rhs, self.usize_type);
-            self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
-        }
-        else {
+            lhs = self.context.new_bitcast(self.location, lhs, self.usize_type);
+            rhs = self.context.new_bitcast(self.location, rhs, self.usize_type);
+            self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs)
+        } else {
             if a_type != b_type {
                 // NOTE: because libgccjit cannot compare function pointers.
-                if a_type.dyncast_function_ptr_type().is_some() && b_type.dyncast_function_ptr_type().is_some() {
-                    lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
-                    rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
+                if a_type.dyncast_function_ptr_type().is_some()
+                    && b_type.dyncast_function_ptr_type().is_some()
+                {
+                    lhs = self.context.new_cast(self.location, lhs, self.usize_type.make_pointer());
+                    rhs = self.context.new_cast(self.location, rhs, self.usize_type.make_pointer());
                 }
                 // NOTE: hack because we try to cast a vector type to the same vector type.
                 else if format!("{:?}", a_type) != format!("{:?}", b_type) {
-                    rhs = self.context.new_cast(None, rhs, a_type);
+                    rhs = self.context.new_cast(self.location, rhs, a_type);
                 }
             }
             match op {
-                IntPredicate::IntUGT | IntPredicate::IntUGE | IntPredicate::IntULT | IntPredicate::IntULE => {
+                IntPredicate::IntUGT
+                | IntPredicate::IntUGE
+                | IntPredicate::IntULT
+                | IntPredicate::IntULE => {
                     if !a_type.is_vector() {
                         let unsigned_type = a_type.to_unsigned(&self.cx);
-                        lhs = self.context.new_cast(None, lhs, unsigned_type);
-                        rhs = self.context.new_cast(None, rhs, unsigned_type);
+                        lhs = self.context.new_cast(self.location, lhs, unsigned_type);
+                        rhs = self.context.new_cast(self.location, rhs, unsigned_type);
                     }
-                },
+                }
                 // TODO(antoyo): we probably need to handle signed comparison for unsigned
                 // integers.
                 _ => (),
             }
-            self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+            self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs)
         }
     }
 
@@ -508,12 +608,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         if a_type.is_vector() && b_type.is_vector() {
             let b = self.bitcast_if_needed(b, a_type);
             a ^ b
-        }
-        else if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type) {
+        } else if self.is_native_int_type_or_bool(a_type) && self.is_native_int_type_or_bool(b_type)
+        {
             a ^ b
-        }
-        else {
-            self.from_low_high_rvalues(a_type,
+        } else {
+            self.from_low_high_rvalues(
+                a_type,
                 self.low(a) ^ self.low(b),
                 self.high(a) ^ self.high(b),
             )
@@ -528,25 +628,20 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         if a_native && b_native {
             // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
             if a_type.is_unsigned(self) && b_type.is_signed(self) {
-                let a = self.context.new_cast(None, a, b_type);
+                let a = self.context.new_cast(self.location, a, b_type);
                 let result = a << b;
-                self.context.new_cast(None, result, a_type)
-            }
-            else if a_type.is_signed(self) && b_type.is_unsigned(self) {
-                let b = self.context.new_cast(None, b, a_type);
+                self.context.new_cast(self.location, result, a_type)
+            } else if a_type.is_signed(self) && b_type.is_unsigned(self) {
+                let b = self.context.new_cast(self.location, b, a_type);
                 a << b
-            }
-            else {
+            } else {
                 a << b
             }
-        }
-        else if a_type.is_vector() && a_type.is_vector() {
+        } else if a_type.is_vector() && a_type.is_vector() {
             a << b
-        }
-        else if a_native && !b_native {
+        } else if a_native && !b_native {
             self.gcc_shl(a, self.gcc_int_cast(b, a_type))
-        }
-        else {
+        } else {
             // NOTE: we cannot use the ashl builtin because it's calling widen_hi() which uses ashl.
             let native_int_type = a_type.dyncast_array().expect("get element type");
 
@@ -557,40 +652,40 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             let b0_block = func.new_block("b0");
             let actual_else_block = func.new_block("actual_else");
 
-            let result = func.new_local(None, a_type, "shiftResult");
+            let result = func.new_local(self.location, a_type, "shiftResult");
 
             let b = self.gcc_int_cast(b, native_int_type);
             let sixty_four = self.gcc_int(native_int_type, 64);
             let zero = self.gcc_zero(native_int_type);
             let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
-            self.llbb().end_with_conditional(None, condition, then_block, else_block);
+            self.llbb().end_with_conditional(self.location, condition, then_block, else_block);
 
-            let array_value = self.from_low_high_rvalues(a_type,
-                zero,
-                self.low(a) << (b - sixty_four),
-            );
-            then_block.add_assignment(None, result, array_value);
-            then_block.end_with_jump(None, after_block);
+            let array_value =
+                self.from_low_high_rvalues(a_type, zero, self.low(a) << (b - sixty_four));
+            then_block.add_assignment(self.location, result, array_value);
+            then_block.end_with_jump(self.location, after_block);
 
             let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
-            else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
+            else_block.end_with_conditional(self.location, condition, b0_block, actual_else_block);
 
-            b0_block.add_assignment(None, result, a);
-            b0_block.end_with_jump(None, after_block);
+            b0_block.add_assignment(self.location, result, a);
+            b0_block.end_with_jump(self.location, after_block);
 
             // NOTE: cast low to its unsigned type in order to perform a logical right shift.
             // TODO(antoyo): adjust this ^ comment.
             let unsigned_type = native_int_type.to_unsigned(&self.cx);
-            let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
-            let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type);
-            let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type);
+            let casted_low = self.context.new_cast(self.location, self.low(a), unsigned_type);
+            let shift_value = self.context.new_cast(self.location, sixty_four - b, unsigned_type);
+            let high_low =
+                self.context.new_cast(self.location, casted_low >> shift_value, native_int_type);
 
-            let array_value = self.from_low_high_rvalues(a_type,
+            let array_value = self.from_low_high_rvalues(
+                a_type,
                 self.low(a) << b,
                 (self.high(a) << b) | high_low,
             );
-            actual_else_block.add_assignment(None, result, array_value);
-            actual_else_block.end_with_jump(None, after_block);
+            actual_else_block.add_assignment(self.location, result, array_value);
+            actual_else_block.end_with_jump(self.location, after_block);
 
             // NOTE: since jumps were added in a place rustc does not expect, the current block in the
             // state need to be updated.
@@ -606,10 +701,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             let native_int_type = arg_type.dyncast_array().expect("get element type");
             let lsb = self.low(arg);
             let swapped_lsb = self.gcc_bswap(lsb, width / 2);
-            let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type);
+            let swapped_lsb = self.context.new_cast(self.location, swapped_lsb, native_int_type);
             let msb = self.high(arg);
             let swapped_msb = self.gcc_bswap(msb, width / 2);
-            let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type);
+            let swapped_msb = self.context.new_cast(self.location, swapped_msb, native_int_type);
 
             // NOTE: we also need to swap the two elements here, in addition to swapping inside
             // the elements themselves like done above.
@@ -625,7 +720,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         if param_type != arg_type {
             arg = self.bitcast(arg, param_type);
         }
-        self.cx.context.new_call(None, bswap, &[arg])
+        self.cx.context.new_call(self.location, bswap, &[arg])
     }
 }
 
@@ -633,8 +728,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
     pub fn gcc_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
         if self.is_native_int_type_or_bool(typ) {
             self.context.new_rvalue_from_long(typ, i64::try_from(int).expect("i64::try_from"))
-        }
-        else {
+        } else {
             // NOTE: set the sign in high.
             self.from_low_high(typ, int, -(int.is_negative() as i64))
         }
@@ -645,11 +739,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
             // FIXME(antoyo): libgccjit cannot create 128-bit values yet.
             let num = self.context.new_rvalue_from_long(self.u64_type, int as i64);
             self.gcc_int_cast(num, typ)
-        }
-        else if self.is_native_int_type_or_bool(typ) {
-            self.context.new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
-        }
-        else {
+        } else if self.is_native_int_type_or_bool(typ) {
+            self.context
+                .new_rvalue_from_long(typ, u64::try_from(int).expect("u64::try_from") as i64)
+        } else {
             self.from_low_high(typ, int as i64, 0)
         }
     }
@@ -666,17 +759,14 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
                 let sixty_four = self.context.new_rvalue_from_long(typ, 64);
                 let shift = high << sixty_four;
                 shift | self.context.new_cast(None, low, typ)
-            }
-            else {
+            } else {
                 self.from_low_high(typ, low as i64, high as i64)
             }
-        }
-        else if typ.is_i128(self) {
+        } else if typ.is_i128(self) {
             // FIXME(antoyo): libgccjit cannot create 128-bit values yet.
             let num = self.context.new_rvalue_from_long(self.u64_type, num as u64 as i64);
             self.gcc_int_cast(num, typ)
-        }
-        else {
+        } else {
             self.gcc_uint(typ, num as u64)
         }
     }
@@ -684,8 +774,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
     pub fn gcc_zero(&self, typ: Type<'gcc>) -> RValue<'gcc> {
         if self.is_native_int_type_or_bool(typ) {
             self.context.new_rvalue_zero(typ)
-        }
-        else {
+        } else {
             self.from_low_high(typ, 0, 0)
         }
     }
@@ -693,64 +782,88 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
     pub fn gcc_int_width(&self, typ: Type<'gcc>) -> u64 {
         if self.is_native_int_type_or_bool(typ) {
             typ.get_size() as u64 * 8
-        }
-        else {
+        } else {
             // NOTE: the only unsupported types are u128 and i128.
             128
         }
     }
 
-    fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+    fn bitwise_operation(
+        &self,
+        operation: BinaryOp,
+        a: RValue<'gcc>,
+        mut b: RValue<'gcc>,
+        loc: Option<Location<'gcc>>,
+    ) -> RValue<'gcc> {
         let a_type = a.get_type();
         let b_type = b.get_type();
         let a_native = self.is_native_int_type_or_bool(a_type);
         let b_native = self.is_native_int_type_or_bool(b_type);
         if a_type.is_vector() && b_type.is_vector() {
             let b = self.bitcast_if_needed(b, a_type);
-            self.context.new_binary_op(None, operation, a_type, a, b)
-        }
-        else if a_native && b_native {
+            self.context.new_binary_op(loc, operation, a_type, a, b)
+        } else if a_native && b_native {
             if a_type != b_type {
-                b = self.context.new_cast(None, b, a_type);
+                b = self.context.new_cast(loc, b, a_type);
             }
-            self.context.new_binary_op(None, operation, a_type, a, b)
-        }
-        else {
-            assert!(!a_native && !b_native, "both types should either be native or non-native for or operation");
+            self.context.new_binary_op(loc, operation, a_type, a, b)
+        } else {
+            assert!(
+                !a_native && !b_native,
+                "both types should either be native or non-native for or operation"
+            );
             let native_int_type = a_type.dyncast_array().expect("get element type");
-            self.from_low_high_rvalues(a_type,
-                self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)),
-                self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)),
+            self.from_low_high_rvalues(
+                a_type,
+                self.context.new_binary_op(
+                    loc,
+                    operation,
+                    native_int_type,
+                    self.low(a),
+                    self.low(b),
+                ),
+                self.context.new_binary_op(
+                    loc,
+                    operation,
+                    native_int_type,
+                    self.high(a),
+                    self.high(b),
+                ),
             )
         }
     }
 
-    pub fn gcc_or(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.bitwise_operation(BinaryOp::BitwiseOr, a, b)
+    pub fn gcc_or(
+        &self,
+        a: RValue<'gcc>,
+        b: RValue<'gcc>,
+        loc: Option<Location<'gcc>>,
+    ) -> RValue<'gcc> {
+        self.bitwise_operation(BinaryOp::BitwiseOr, a, b, loc)
     }
 
     // TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
     pub fn gcc_int_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
         let value_type = value.get_type();
-        if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type) {
+        if self.is_native_int_type_or_bool(dest_typ) && self.is_native_int_type_or_bool(value_type)
+        {
             self.context.new_cast(None, value, dest_typ)
-        }
-        else if self.is_native_int_type_or_bool(dest_typ) {
+        } else if self.is_native_int_type_or_bool(dest_typ) {
             self.context.new_cast(None, self.low(value), dest_typ)
-        }
-        else if self.is_native_int_type_or_bool(value_type) {
+        } else if self.is_native_int_type_or_bool(value_type) {
             let dest_element_type = dest_typ.dyncast_array().expect("get element type");
 
             // NOTE: set the sign of the value.
             let zero = self.context.new_rvalue_zero(value_type);
-            let is_negative = self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
+            let is_negative =
+                self.context.new_comparison(None, ComparisonOp::LessThan, value, zero);
             let is_negative = self.gcc_int_cast(is_negative, dest_element_type);
-            self.from_low_high_rvalues(dest_typ,
+            self.from_low_high_rvalues(
+                dest_typ,
                 self.context.new_cast(None, value, dest_element_type),
                 self.context.new_unary_op(None, UnaryOp::Minus, dest_element_type, is_negative),
             )
-        }
-        else {
+        } else {
             // Since u128 and i128 are the only types that can be unsupported, we know the type of
             // value and the destination type have the same size, so a bitcast is fine.
 
@@ -759,29 +872,34 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         }
     }
 
-    fn int_to_float_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+    fn int_to_float_cast(
+        &self,
+        signed: bool,
+        value: RValue<'gcc>,
+        dest_typ: Type<'gcc>,
+    ) -> RValue<'gcc> {
         let value_type = value.get_type();
         if self.is_native_int_type_or_bool(value_type) {
             return self.context.new_cast(None, value, dest_typ);
         }
 
         debug_assert!(value_type.dyncast_array().is_some());
-        let name_suffix =
-            match self.type_kind(dest_typ) {
-                TypeKind::Float => "tisf",
-                TypeKind::Double => "tidf",
-                kind => panic!("cannot cast a non-native integer to type {:?}", kind),
-            };
-        let sign =
-            if signed {
-                ""
-            }
-            else {
-                "un"
-            };
+        let name_suffix = match self.type_kind(dest_typ) {
+            TypeKind::Float => "tisf",
+            TypeKind::Double => "tidf",
+            kind => panic!("cannot cast a non-native integer to type {:?}", kind),
+        };
+        let sign = if signed { "" } else { "un" };
         let func_name = format!("__float{}{}", sign, name_suffix);
         let param = self.context.new_parameter(None, value_type, "n");
-        let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
+        let func = self.context.new_function(
+            None,
+            FunctionType::Extern,
+            dest_typ,
+            &[param],
+            func_name,
+            false,
+        );
         self.context.new_call(None, func, &[value])
     }
 
@@ -789,33 +907,42 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         self.int_to_float_cast(true, value, dest_typ)
     }
 
-    pub fn gcc_uint_to_float_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+    pub fn gcc_uint_to_float_cast(
+        &self,
+        value: RValue<'gcc>,
+        dest_typ: Type<'gcc>,
+    ) -> RValue<'gcc> {
         self.int_to_float_cast(false, value, dest_typ)
     }
 
-    fn float_to_int_cast(&self, signed: bool, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+    fn float_to_int_cast(
+        &self,
+        signed: bool,
+        value: RValue<'gcc>,
+        dest_typ: Type<'gcc>,
+    ) -> RValue<'gcc> {
         let value_type = value.get_type();
         if self.is_native_int_type_or_bool(dest_typ) {
             return self.context.new_cast(None, value, dest_typ);
         }
 
         debug_assert!(value_type.dyncast_array().is_some());
-        let name_suffix =
-            match self.type_kind(value_type) {
-                TypeKind::Float => "sfti",
-                TypeKind::Double => "dfti",
-                kind => panic!("cannot cast a {:?} to non-native integer", kind),
-            };
-        let sign =
-            if signed {
-                ""
-            }
-            else {
-                "uns"
-            };
+        let name_suffix = match self.type_kind(value_type) {
+            TypeKind::Float => "sfti",
+            TypeKind::Double => "dfti",
+            kind => panic!("cannot cast a {:?} to non-native integer", kind),
+        };
+        let sign = if signed { "" } else { "uns" };
         let func_name = format!("__fix{}{}", sign, name_suffix);
         let param = self.context.new_parameter(None, value_type, "n");
-        let func = self.context.new_function(None, FunctionType::Extern, dest_typ, &[param], func_name, false);
+        let func = self.context.new_function(
+            None,
+            FunctionType::Extern,
+            dest_typ,
+            &[param],
+            func_name,
+            false,
+        );
         self.context.new_call(None, func, &[value])
     }
 
@@ -823,47 +950,54 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         self.float_to_int_cast(true, value, dest_typ)
     }
 
-    pub fn gcc_float_to_uint_cast(&self, value: RValue<'gcc>, dest_typ: Type<'gcc>) -> RValue<'gcc> {
+    pub fn gcc_float_to_uint_cast(
+        &self,
+        value: RValue<'gcc>,
+        dest_typ: Type<'gcc>,
+    ) -> RValue<'gcc> {
         self.float_to_int_cast(false, value, dest_typ)
     }
 
     fn high(&self, value: RValue<'gcc>) -> RValue<'gcc> {
-        let index =
-            match self.sess().target.options.endian {
-                Endian::Little => 1,
-                Endian::Big => 0,
-            };
-        self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, index))
+        let index = match self.sess().target.options.endian {
+            Endian::Little => 1,
+            Endian::Big => 0,
+        };
+        self.context
+            .new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, index))
             .to_rvalue()
     }
 
     fn low(&self, value: RValue<'gcc>) -> RValue<'gcc> {
-        let index =
-            match self.sess().target.options.endian {
-                Endian::Little => 0,
-                Endian::Big => 1,
-            };
-        self.context.new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, index))
+        let index = match self.sess().target.options.endian {
+            Endian::Little => 0,
+            Endian::Big => 1,
+        };
+        self.context
+            .new_array_access(None, value, self.context.new_rvalue_from_int(self.int_type, index))
             .to_rvalue()
     }
 
-    fn from_low_high_rvalues(&self, typ: Type<'gcc>, low: RValue<'gcc>, high: RValue<'gcc>) -> RValue<'gcc> {
-        let (first, last) =
-            match self.sess().target.options.endian {
-                Endian::Little => (low, high),
-                Endian::Big => (high, low),
-            };
+    fn from_low_high_rvalues(
+        &self,
+        typ: Type<'gcc>,
+        low: RValue<'gcc>,
+        high: RValue<'gcc>,
+    ) -> RValue<'gcc> {
+        let (first, last) = match self.sess().target.options.endian {
+            Endian::Little => (low, high),
+            Endian::Big => (high, low),
+        };
 
         let values = [first, last];
         self.context.new_array_constructor(None, typ, &values)
     }
 
     fn from_low_high(&self, typ: Type<'gcc>, low: i64, high: i64) -> RValue<'gcc> {
-        let (first, last) =
-            match self.sess().target.options.endian {
-                Endian::Little => (low, high),
-                Endian::Big => (high, low),
-            };
+        let (first, last) = match self.sess().target.options.endian {
+            Endian::Little => (low, high),
+            Endian::Big => (high, low),
+        };
 
         let native_int_type = typ.dyncast_array().expect("get element type");
         let values = [
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
index 15d67385c3e..c4ae1751fa0 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/archs.rs
@@ -151,8 +151,10 @@ match name {
     "llvm.amdgcn.msad.u8" => "__builtin_amdgcn_msad_u8",
     "llvm.amdgcn.perm" => "__builtin_amdgcn_perm",
     "llvm.amdgcn.permlane16" => "__builtin_amdgcn_permlane16",
+    "llvm.amdgcn.permlane16.var" => "__builtin_amdgcn_permlane16_var",
     "llvm.amdgcn.permlane64" => "__builtin_amdgcn_permlane64",
     "llvm.amdgcn.permlanex16" => "__builtin_amdgcn_permlanex16",
+    "llvm.amdgcn.permlanex16.var" => "__builtin_amdgcn_permlanex16_var",
     "llvm.amdgcn.qsad.pk.u16.u8" => "__builtin_amdgcn_qsad_pk_u16_u8",
     "llvm.amdgcn.queue.ptr" => "__builtin_amdgcn_queue_ptr",
     "llvm.amdgcn.rcp.legacy" => "__builtin_amdgcn_rcp_legacy",
@@ -160,11 +162,20 @@ match name {
     "llvm.amdgcn.readlane" => "__builtin_amdgcn_readlane",
     "llvm.amdgcn.rsq.legacy" => "__builtin_amdgcn_rsq_legacy",
     "llvm.amdgcn.s.barrier" => "__builtin_amdgcn_s_barrier",
+    "llvm.amdgcn.s.barrier.init" => "__builtin_amdgcn_s_barrier_init",
+    "llvm.amdgcn.s.barrier.join" => "__builtin_amdgcn_s_barrier_join",
+    "llvm.amdgcn.s.barrier.leave" => "__builtin_amdgcn_s_barrier_leave",
+    "llvm.amdgcn.s.barrier.signal" => "__builtin_amdgcn_s_barrier_signal",
+    "llvm.amdgcn.s.barrier.signal.isfirst" => "__builtin_amdgcn_s_barrier_signal_isfirst",
+    "llvm.amdgcn.s.barrier.signal.isfirst.var" => "__builtin_amdgcn_s_barrier_signal_isfirst_var",
+    "llvm.amdgcn.s.barrier.signal.var" => "__builtin_amdgcn_s_barrier_signal_var",
+    "llvm.amdgcn.s.barrier.wait" => "__builtin_amdgcn_s_barrier_wait",
     "llvm.amdgcn.s.dcache.inv" => "__builtin_amdgcn_s_dcache_inv",
     "llvm.amdgcn.s.dcache.inv.vol" => "__builtin_amdgcn_s_dcache_inv_vol",
     "llvm.amdgcn.s.dcache.wb" => "__builtin_amdgcn_s_dcache_wb",
     "llvm.amdgcn.s.dcache.wb.vol" => "__builtin_amdgcn_s_dcache_wb_vol",
     "llvm.amdgcn.s.decperflevel" => "__builtin_amdgcn_s_decperflevel",
+    "llvm.amdgcn.s.get.barrier.state" => "__builtin_amdgcn_s_get_barrier_state",
     "llvm.amdgcn.s.get.waveid.in.workgroup" => "__builtin_amdgcn_s_get_waveid_in_workgroup",
     "llvm.amdgcn.s.getpc" => "__builtin_amdgcn_s_getpc",
     "llvm.amdgcn.s.getreg" => "__builtin_amdgcn_s_getreg",
@@ -176,8 +187,10 @@ match name {
     "llvm.amdgcn.s.setprio" => "__builtin_amdgcn_s_setprio",
     "llvm.amdgcn.s.setreg" => "__builtin_amdgcn_s_setreg",
     "llvm.amdgcn.s.sleep" => "__builtin_amdgcn_s_sleep",
+    "llvm.amdgcn.s.sleep.var" => "__builtin_amdgcn_s_sleep_var",
     "llvm.amdgcn.s.wait.event.export.ready" => "__builtin_amdgcn_s_wait_event_export_ready",
     "llvm.amdgcn.s.waitcnt" => "__builtin_amdgcn_s_waitcnt",
+    "llvm.amdgcn.s.wakeup.barrier" => "__builtin_amdgcn_s_wakeup_barrier",
     "llvm.amdgcn.sad.hi.u8" => "__builtin_amdgcn_sad_hi_u8",
     "llvm.amdgcn.sad.u16" => "__builtin_amdgcn_sad_u16",
     "llvm.amdgcn.sad.u8" => "__builtin_amdgcn_sad_u8",
@@ -314,6 +327,8 @@ match name {
     // bpf
     "llvm.bpf.btf.type.id" => "__builtin_bpf_btf_type_id",
     "llvm.bpf.compare" => "__builtin_bpf_compare",
+    "llvm.bpf.getelementptr.and.load" => "__builtin_bpf_getelementptr_and_load",
+    "llvm.bpf.getelementptr.and.store" => "__builtin_bpf_getelementptr_and_store",
     "llvm.bpf.load.byte" => "__builtin_bpf_load_byte",
     "llvm.bpf.load.half" => "__builtin_bpf_load_half",
     "llvm.bpf.load.word" => "__builtin_bpf_load_word",
@@ -5776,14 +5791,6 @@ match name {
     "llvm.s390.verimf" => "__builtin_s390_verimf",
     "llvm.s390.verimg" => "__builtin_s390_verimg",
     "llvm.s390.verimh" => "__builtin_s390_verimh",
-    "llvm.s390.verllb" => "__builtin_s390_verllb",
-    "llvm.s390.verllf" => "__builtin_s390_verllf",
-    "llvm.s390.verllg" => "__builtin_s390_verllg",
-    "llvm.s390.verllh" => "__builtin_s390_verllh",
-    "llvm.s390.verllvb" => "__builtin_s390_verllvb",
-    "llvm.s390.verllvf" => "__builtin_s390_verllvf",
-    "llvm.s390.verllvg" => "__builtin_s390_verllvg",
-    "llvm.s390.verllvh" => "__builtin_s390_verllvh",
     "llvm.s390.vfaeb" => "__builtin_s390_vfaeb",
     "llvm.s390.vfaef" => "__builtin_s390_vfaef",
     "llvm.s390.vfaeh" => "__builtin_s390_vfaeh",
@@ -5815,7 +5822,7 @@ match name {
     "llvm.s390.vistrh" => "__builtin_s390_vistrh",
     "llvm.s390.vlbb" => "__builtin_s390_vlbb",
     "llvm.s390.vll" => "__builtin_s390_vll",
-    "llvm.s390.vlrl" => "__builtin_s390_vlrl",
+    "llvm.s390.vlrl" => "__builtin_s390_vlrlr",
     "llvm.s390.vmaeb" => "__builtin_s390_vmaeb",
     "llvm.s390.vmaef" => "__builtin_s390_vmaef",
     "llvm.s390.vmaeh" => "__builtin_s390_vmaeh",
@@ -5885,7 +5892,7 @@ match name {
     "llvm.s390.vstrczb" => "__builtin_s390_vstrczb",
     "llvm.s390.vstrczf" => "__builtin_s390_vstrczf",
     "llvm.s390.vstrczh" => "__builtin_s390_vstrczh",
-    "llvm.s390.vstrl" => "__builtin_s390_vstrl",
+    "llvm.s390.vstrl" => "__builtin_s390_vstrlr",
     "llvm.s390.vsumb" => "__builtin_s390_vsumb",
     "llvm.s390.vsumgf" => "__builtin_s390_vsumgf",
     "llvm.s390.vsumgh" => "__builtin_s390_vsumgh",
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
index 35eb4a11005..ce8dee69a98 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs
@@ -3,94 +3,185 @@ use std::borrow::Cow;
 use gccjit::{Function, FunctionPtrType, RValue, ToRValue, UnaryOp};
 use rustc_codegen_ssa::traits::BuilderMethods;
 
-use crate::{context::CodegenCx, builder::Builder};
+use crate::{builder::Builder, context::CodegenCx};
 
-pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, gcc_func: FunctionPtrType<'gcc>, mut args: Cow<'b, [RValue<'gcc>]>, func_name: &str, original_function_name: Option<&String>) -> Cow<'b, [RValue<'gcc>]> {
+pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+    gcc_func: FunctionPtrType<'gcc>,
+    mut args: Cow<'b, [RValue<'gcc>]>,
+    func_name: &str,
+    original_function_name: Option<&String>,
+) -> Cow<'b, [RValue<'gcc>]> {
     // Some LLVM intrinsics do not map 1-to-1 to GCC intrinsics, so we add the missing
     // arguments here.
     if gcc_func.get_param_count() != args.len() {
         match &*func_name {
             // NOTE: the following intrinsics have a different number of parameters in LLVM and GCC.
-            "__builtin_ia32_prold512_mask" | "__builtin_ia32_pmuldq512_mask" | "__builtin_ia32_pmuludq512_mask"
-                | "__builtin_ia32_pmaxsd512_mask" | "__builtin_ia32_pmaxsq512_mask" | "__builtin_ia32_pmaxsq256_mask"
-                | "__builtin_ia32_pmaxsq128_mask" | "__builtin_ia32_pmaxud512_mask" | "__builtin_ia32_pmaxuq512_mask"
-                | "__builtin_ia32_pminsd512_mask" | "__builtin_ia32_pminsq512_mask" | "__builtin_ia32_pminsq256_mask"
-                | "__builtin_ia32_pminsq128_mask" | "__builtin_ia32_pminud512_mask" | "__builtin_ia32_pminuq512_mask"
-                | "__builtin_ia32_prolq512_mask" | "__builtin_ia32_prorq512_mask" | "__builtin_ia32_pslldi512_mask"
-                | "__builtin_ia32_psrldi512_mask" | "__builtin_ia32_psllqi512_mask" | "__builtin_ia32_psrlqi512_mask"
-                | "__builtin_ia32_pslld512_mask" | "__builtin_ia32_psrld512_mask" | "__builtin_ia32_psllq512_mask"
-                | "__builtin_ia32_psrlq512_mask" | "__builtin_ia32_psrad512_mask" | "__builtin_ia32_psraq512_mask"
-                | "__builtin_ia32_psradi512_mask" | "__builtin_ia32_psraqi512_mask" | "__builtin_ia32_psrav16si_mask"
-                | "__builtin_ia32_psrav8di_mask" | "__builtin_ia32_prolvd512_mask" | "__builtin_ia32_prorvd512_mask"
-                | "__builtin_ia32_prolvq512_mask" | "__builtin_ia32_prorvq512_mask" | "__builtin_ia32_psllv16si_mask"
-                | "__builtin_ia32_psrlv16si_mask" | "__builtin_ia32_psllv8di_mask" | "__builtin_ia32_psrlv8di_mask"
-                | "__builtin_ia32_permvarsi512_mask" | "__builtin_ia32_vpermilvarps512_mask"
-                | "__builtin_ia32_vpermilvarpd512_mask" | "__builtin_ia32_permvardi512_mask"
-                | "__builtin_ia32_permvarsf512_mask" | "__builtin_ia32_permvarqi512_mask"
-                | "__builtin_ia32_permvarqi256_mask" | "__builtin_ia32_permvarqi128_mask"
-                | "__builtin_ia32_vpmultishiftqb512_mask" | "__builtin_ia32_vpmultishiftqb256_mask"
-                | "__builtin_ia32_vpmultishiftqb128_mask"
-                => {
+            "__builtin_ia32_prold512_mask"
+            | "__builtin_ia32_pmuldq512_mask"
+            | "__builtin_ia32_pmuludq512_mask"
+            | "__builtin_ia32_pmaxsd512_mask"
+            | "__builtin_ia32_pmaxsq512_mask"
+            | "__builtin_ia32_pmaxsq256_mask"
+            | "__builtin_ia32_pmaxsq128_mask"
+            | "__builtin_ia32_pmaxud512_mask"
+            | "__builtin_ia32_pmaxuq512_mask"
+            | "__builtin_ia32_pminsd512_mask"
+            | "__builtin_ia32_pminsq512_mask"
+            | "__builtin_ia32_pminsq256_mask"
+            | "__builtin_ia32_pminsq128_mask"
+            | "__builtin_ia32_pminud512_mask"
+            | "__builtin_ia32_pminuq512_mask"
+            | "__builtin_ia32_prolq512_mask"
+            | "__builtin_ia32_prorq512_mask"
+            | "__builtin_ia32_pslldi512_mask"
+            | "__builtin_ia32_psrldi512_mask"
+            | "__builtin_ia32_psllqi512_mask"
+            | "__builtin_ia32_psrlqi512_mask"
+            | "__builtin_ia32_pslld512_mask"
+            | "__builtin_ia32_psrld512_mask"
+            | "__builtin_ia32_psllq512_mask"
+            | "__builtin_ia32_psrlq512_mask"
+            | "__builtin_ia32_psrad512_mask"
+            | "__builtin_ia32_psraq512_mask"
+            | "__builtin_ia32_psradi512_mask"
+            | "__builtin_ia32_psraqi512_mask"
+            | "__builtin_ia32_psrav16si_mask"
+            | "__builtin_ia32_psrav8di_mask"
+            | "__builtin_ia32_prolvd512_mask"
+            | "__builtin_ia32_prorvd512_mask"
+            | "__builtin_ia32_prolvq512_mask"
+            | "__builtin_ia32_prorvq512_mask"
+            | "__builtin_ia32_psllv16si_mask"
+            | "__builtin_ia32_psrlv16si_mask"
+            | "__builtin_ia32_psllv8di_mask"
+            | "__builtin_ia32_psrlv8di_mask"
+            | "__builtin_ia32_permvarsi512_mask"
+            | "__builtin_ia32_vpermilvarps512_mask"
+            | "__builtin_ia32_vpermilvarpd512_mask"
+            | "__builtin_ia32_permvardi512_mask"
+            | "__builtin_ia32_permvarsf512_mask"
+            | "__builtin_ia32_permvarqi512_mask"
+            | "__builtin_ia32_permvarqi256_mask"
+            | "__builtin_ia32_permvarqi128_mask"
+            | "__builtin_ia32_vpmultishiftqb512_mask"
+            | "__builtin_ia32_vpmultishiftqb256_mask"
+            | "__builtin_ia32_vpmultishiftqb128_mask" => {
                 let mut new_args = args.to_vec();
                 let arg3_type = gcc_func.get_param_type(2);
-                let first_arg = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue();
+                let first_arg = builder
+                    .current_func()
+                    .new_local(None, arg3_type, "undefined_for_intrinsic")
+                    .to_rvalue();
                 new_args.push(first_arg);
                 let arg4_type = gcc_func.get_param_type(3);
                 let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
                 new_args.push(minus_one);
                 args = new_args.into();
-            },
-            "__builtin_ia32_pmaxuq256_mask" | "__builtin_ia32_pmaxuq128_mask" | "__builtin_ia32_pminuq256_mask"
-                | "__builtin_ia32_pminuq128_mask" | "__builtin_ia32_prold256_mask" | "__builtin_ia32_prold128_mask"
-                | "__builtin_ia32_prord512_mask" | "__builtin_ia32_prord256_mask" | "__builtin_ia32_prord128_mask"
-                | "__builtin_ia32_prolq256_mask" | "__builtin_ia32_prolq128_mask" | "__builtin_ia32_prorq256_mask"
-                | "__builtin_ia32_prorq128_mask" | "__builtin_ia32_psraq256_mask" | "__builtin_ia32_psraq128_mask"
-                | "__builtin_ia32_psraqi256_mask" | "__builtin_ia32_psraqi128_mask" | "__builtin_ia32_psravq256_mask"
-                | "__builtin_ia32_psravq128_mask" | "__builtin_ia32_prolvd256_mask" | "__builtin_ia32_prolvd128_mask"
-                | "__builtin_ia32_prorvd256_mask" | "__builtin_ia32_prorvd128_mask" | "__builtin_ia32_prolvq256_mask"
-                | "__builtin_ia32_prolvq128_mask" | "__builtin_ia32_prorvq256_mask" | "__builtin_ia32_prorvq128_mask"
-                | "__builtin_ia32_permvardi256_mask" | "__builtin_ia32_permvardf512_mask" | "__builtin_ia32_permvardf256_mask"
-                | "__builtin_ia32_pmulhuw512_mask" | "__builtin_ia32_pmulhw512_mask" | "__builtin_ia32_pmulhrsw512_mask"
-                | "__builtin_ia32_pmaxuw512_mask" | "__builtin_ia32_pmaxub512_mask" | "__builtin_ia32_pmaxsw512_mask"
-                | "__builtin_ia32_pmaxsb512_mask" | "__builtin_ia32_pminuw512_mask" | "__builtin_ia32_pminub512_mask"
-                | "__builtin_ia32_pminsw512_mask" | "__builtin_ia32_pminsb512_mask"
-                | "__builtin_ia32_pmaddwd512_mask" | "__builtin_ia32_pmaddubsw512_mask" | "__builtin_ia32_packssdw512_mask"
-                | "__builtin_ia32_packsswb512_mask" | "__builtin_ia32_packusdw512_mask" | "__builtin_ia32_packuswb512_mask"
-                | "__builtin_ia32_pavgw512_mask" | "__builtin_ia32_pavgb512_mask" | "__builtin_ia32_psllw512_mask"
-                | "__builtin_ia32_psllwi512_mask" | "__builtin_ia32_psllv32hi_mask" | "__builtin_ia32_psrlw512_mask"
-                | "__builtin_ia32_psrlwi512_mask" | "__builtin_ia32_psllv16hi_mask" | "__builtin_ia32_psllv8hi_mask"
-                | "__builtin_ia32_psrlv32hi_mask" | "__builtin_ia32_psraw512_mask" | "__builtin_ia32_psrawi512_mask"
-                | "__builtin_ia32_psrlv16hi_mask" | "__builtin_ia32_psrlv8hi_mask" | "__builtin_ia32_psrav32hi_mask"
-                | "__builtin_ia32_permvarhi512_mask" | "__builtin_ia32_pshufb512_mask" | "__builtin_ia32_psrav16hi_mask"
-                | "__builtin_ia32_psrav8hi_mask" | "__builtin_ia32_permvarhi256_mask" | "__builtin_ia32_permvarhi128_mask"
-                => {
+            }
+            "__builtin_ia32_pmaxuq256_mask"
+            | "__builtin_ia32_pmaxuq128_mask"
+            | "__builtin_ia32_pminuq256_mask"
+            | "__builtin_ia32_pminuq128_mask"
+            | "__builtin_ia32_prold256_mask"
+            | "__builtin_ia32_prold128_mask"
+            | "__builtin_ia32_prord512_mask"
+            | "__builtin_ia32_prord256_mask"
+            | "__builtin_ia32_prord128_mask"
+            | "__builtin_ia32_prolq256_mask"
+            | "__builtin_ia32_prolq128_mask"
+            | "__builtin_ia32_prorq256_mask"
+            | "__builtin_ia32_prorq128_mask"
+            | "__builtin_ia32_psraq256_mask"
+            | "__builtin_ia32_psraq128_mask"
+            | "__builtin_ia32_psraqi256_mask"
+            | "__builtin_ia32_psraqi128_mask"
+            | "__builtin_ia32_psravq256_mask"
+            | "__builtin_ia32_psravq128_mask"
+            | "__builtin_ia32_prolvd256_mask"
+            | "__builtin_ia32_prolvd128_mask"
+            | "__builtin_ia32_prorvd256_mask"
+            | "__builtin_ia32_prorvd128_mask"
+            | "__builtin_ia32_prolvq256_mask"
+            | "__builtin_ia32_prolvq128_mask"
+            | "__builtin_ia32_prorvq256_mask"
+            | "__builtin_ia32_prorvq128_mask"
+            | "__builtin_ia32_permvardi256_mask"
+            | "__builtin_ia32_permvardf512_mask"
+            | "__builtin_ia32_permvardf256_mask"
+            | "__builtin_ia32_pmulhuw512_mask"
+            | "__builtin_ia32_pmulhw512_mask"
+            | "__builtin_ia32_pmulhrsw512_mask"
+            | "__builtin_ia32_pmaxuw512_mask"
+            | "__builtin_ia32_pmaxub512_mask"
+            | "__builtin_ia32_pmaxsw512_mask"
+            | "__builtin_ia32_pmaxsb512_mask"
+            | "__builtin_ia32_pminuw512_mask"
+            | "__builtin_ia32_pminub512_mask"
+            | "__builtin_ia32_pminsw512_mask"
+            | "__builtin_ia32_pminsb512_mask"
+            | "__builtin_ia32_pmaddwd512_mask"
+            | "__builtin_ia32_pmaddubsw512_mask"
+            | "__builtin_ia32_packssdw512_mask"
+            | "__builtin_ia32_packsswb512_mask"
+            | "__builtin_ia32_packusdw512_mask"
+            | "__builtin_ia32_packuswb512_mask"
+            | "__builtin_ia32_pavgw512_mask"
+            | "__builtin_ia32_pavgb512_mask"
+            | "__builtin_ia32_psllw512_mask"
+            | "__builtin_ia32_psllwi512_mask"
+            | "__builtin_ia32_psllv32hi_mask"
+            | "__builtin_ia32_psrlw512_mask"
+            | "__builtin_ia32_psrlwi512_mask"
+            | "__builtin_ia32_psllv16hi_mask"
+            | "__builtin_ia32_psllv8hi_mask"
+            | "__builtin_ia32_psrlv32hi_mask"
+            | "__builtin_ia32_psraw512_mask"
+            | "__builtin_ia32_psrawi512_mask"
+            | "__builtin_ia32_psrlv16hi_mask"
+            | "__builtin_ia32_psrlv8hi_mask"
+            | "__builtin_ia32_psrav32hi_mask"
+            | "__builtin_ia32_permvarhi512_mask"
+            | "__builtin_ia32_pshufb512_mask"
+            | "__builtin_ia32_psrav16hi_mask"
+            | "__builtin_ia32_psrav8hi_mask"
+            | "__builtin_ia32_permvarhi256_mask"
+            | "__builtin_ia32_permvarhi128_mask" => {
                 let mut new_args = args.to_vec();
                 let arg3_type = gcc_func.get_param_type(2);
                 let vector_type = arg3_type.dyncast_vector().expect("vector type");
                 let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
                 let num_units = vector_type.get_num_units();
-                let first_arg = builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]);
+                let first_arg =
+                    builder.context.new_rvalue_from_vector(None, arg3_type, &vec![zero; num_units]);
                 new_args.push(first_arg);
                 let arg4_type = gcc_func.get_param_type(3);
                 let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
                 new_args.push(minus_one);
                 args = new_args.into();
-            },
-            "__builtin_ia32_dbpsadbw512_mask" | "__builtin_ia32_dbpsadbw256_mask" | "__builtin_ia32_dbpsadbw128_mask" => {
+            }
+            "__builtin_ia32_dbpsadbw512_mask"
+            | "__builtin_ia32_dbpsadbw256_mask"
+            | "__builtin_ia32_dbpsadbw128_mask" => {
                 let mut new_args = args.to_vec();
                 let arg4_type = gcc_func.get_param_type(3);
                 let vector_type = arg4_type.dyncast_vector().expect("vector type");
                 let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
                 let num_units = vector_type.get_num_units();
-                let first_arg = builder.context.new_rvalue_from_vector(None, arg4_type, &vec![zero; num_units]);
+                let first_arg =
+                    builder.context.new_rvalue_from_vector(None, arg4_type, &vec![zero; num_units]);
                 new_args.push(first_arg);
                 let arg5_type = gcc_func.get_param_type(4);
                 let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1);
                 new_args.push(minus_one);
                 args = new_args.into();
-            },
-            "__builtin_ia32_vplzcntd_512_mask" | "__builtin_ia32_vplzcntd_256_mask" | "__builtin_ia32_vplzcntd_128_mask"
-                | "__builtin_ia32_vplzcntq_512_mask" | "__builtin_ia32_vplzcntq_256_mask" | "__builtin_ia32_vplzcntq_128_mask" => {
+            }
+            "__builtin_ia32_vplzcntd_512_mask"
+            | "__builtin_ia32_vplzcntd_256_mask"
+            | "__builtin_ia32_vplzcntd_128_mask"
+            | "__builtin_ia32_vplzcntq_512_mask"
+            | "__builtin_ia32_vplzcntq_256_mask"
+            | "__builtin_ia32_vplzcntq_128_mask" => {
                 let mut new_args = args.to_vec();
                 // Remove last arg as it doesn't seem to be used in GCC and is always false.
                 new_args.pop();
@@ -98,37 +189,45 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
                 let vector_type = arg2_type.dyncast_vector().expect("vector type");
                 let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
                 let num_units = vector_type.get_num_units();
-                let first_arg = builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]);
+                let first_arg =
+                    builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]);
                 new_args.push(first_arg);
                 let arg3_type = gcc_func.get_param_type(2);
                 let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1);
                 new_args.push(minus_one);
                 args = new_args.into();
-            },
-            "__builtin_ia32_vpconflictsi_512_mask" | "__builtin_ia32_vpconflictsi_256_mask"
-                | "__builtin_ia32_vpconflictsi_128_mask" | "__builtin_ia32_vpconflictdi_512_mask"
-                | "__builtin_ia32_vpconflictdi_256_mask" | "__builtin_ia32_vpconflictdi_128_mask" => {
+            }
+            "__builtin_ia32_vpconflictsi_512_mask"
+            | "__builtin_ia32_vpconflictsi_256_mask"
+            | "__builtin_ia32_vpconflictsi_128_mask"
+            | "__builtin_ia32_vpconflictdi_512_mask"
+            | "__builtin_ia32_vpconflictdi_256_mask"
+            | "__builtin_ia32_vpconflictdi_128_mask" => {
                 let mut new_args = args.to_vec();
                 let arg2_type = gcc_func.get_param_type(1);
                 let vector_type = arg2_type.dyncast_vector().expect("vector type");
                 let zero = builder.context.new_rvalue_zero(vector_type.get_element_type());
                 let num_units = vector_type.get_num_units();
-                let first_arg = builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]);
+                let first_arg =
+                    builder.context.new_rvalue_from_vector(None, arg2_type, &vec![zero; num_units]);
                 new_args.push(first_arg);
                 let arg3_type = gcc_func.get_param_type(2);
                 let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1);
                 new_args.push(minus_one);
                 args = new_args.into();
-            },
-            "__builtin_ia32_pternlogd512_mask" | "__builtin_ia32_pternlogd256_mask"
-                | "__builtin_ia32_pternlogd128_mask" | "__builtin_ia32_pternlogq512_mask"
-                | "__builtin_ia32_pternlogq256_mask" | "__builtin_ia32_pternlogq128_mask" => {
+            }
+            "__builtin_ia32_pternlogd512_mask"
+            | "__builtin_ia32_pternlogd256_mask"
+            | "__builtin_ia32_pternlogd128_mask"
+            | "__builtin_ia32_pternlogq512_mask"
+            | "__builtin_ia32_pternlogq256_mask"
+            | "__builtin_ia32_pternlogq128_mask" => {
                 let mut new_args = args.to_vec();
                 let arg5_type = gcc_func.get_param_type(4);
                 let minus_one = builder.context.new_rvalue_from_int(arg5_type, -1);
                 new_args.push(minus_one);
                 args = new_args.into();
-            },
+            }
             "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => {
                 let mut new_args = args.to_vec();
 
@@ -154,24 +253,33 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
                 }
 
                 args = new_args.into();
-            },
-            "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask"
-                | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask"
-                | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask"
-                | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask"
-                | "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask"
-                |  "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" => {
+            }
+            "__builtin_ia32_addps512_mask"
+            | "__builtin_ia32_addpd512_mask"
+            | "__builtin_ia32_subps512_mask"
+            | "__builtin_ia32_subpd512_mask"
+            | "__builtin_ia32_mulps512_mask"
+            | "__builtin_ia32_mulpd512_mask"
+            | "__builtin_ia32_divps512_mask"
+            | "__builtin_ia32_divpd512_mask"
+            | "__builtin_ia32_maxps512_mask"
+            | "__builtin_ia32_maxpd512_mask"
+            | "__builtin_ia32_minps512_mask"
+            | "__builtin_ia32_minpd512_mask" => {
                 let mut new_args = args.to_vec();
                 let last_arg = new_args.pop().expect("last arg");
                 let arg3_type = gcc_func.get_param_type(2);
-                let undefined = builder.current_func().new_local(None, arg3_type, "undefined_for_intrinsic").to_rvalue();
+                let undefined = builder
+                    .current_func()
+                    .new_local(None, arg3_type, "undefined_for_intrinsic")
+                    .to_rvalue();
                 new_args.push(undefined);
                 let arg4_type = gcc_func.get_param_type(3);
                 let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
                 new_args.push(minus_one);
                 new_args.push(last_arg);
                 args = new_args.into();
-            },
+            }
             "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask" => {
                 let mut new_args = args.to_vec();
                 let last_arg = new_args.pop().expect("last arg");
@@ -180,54 +288,72 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
                 new_args.push(minus_one);
                 new_args.push(last_arg);
                 args = new_args.into();
-            },
-            "__builtin_ia32_vpermi2vard512_mask" | "__builtin_ia32_vpermi2vard256_mask"
-                | "__builtin_ia32_vpermi2vard128_mask" | "__builtin_ia32_vpermi2varq512_mask"
-                | "__builtin_ia32_vpermi2varq256_mask" | "__builtin_ia32_vpermi2varq128_mask"
-                | "__builtin_ia32_vpermi2varps512_mask" | "__builtin_ia32_vpermi2varps256_mask"
-                | "__builtin_ia32_vpermi2varps128_mask" | "__builtin_ia32_vpermi2varpd512_mask"
-                | "__builtin_ia32_vpermi2varpd256_mask" | "__builtin_ia32_vpermi2varpd128_mask" | "__builtin_ia32_vpmadd52huq512_mask"
-                | "__builtin_ia32_vpmadd52luq512_mask" | "__builtin_ia32_vpmadd52huq256_mask" | "__builtin_ia32_vpmadd52luq256_mask"
-                | "__builtin_ia32_vpmadd52huq128_mask"
-                => {
+            }
+            "__builtin_ia32_vpermi2vard512_mask"
+            | "__builtin_ia32_vpermi2vard256_mask"
+            | "__builtin_ia32_vpermi2vard128_mask"
+            | "__builtin_ia32_vpermi2varq512_mask"
+            | "__builtin_ia32_vpermi2varq256_mask"
+            | "__builtin_ia32_vpermi2varq128_mask"
+            | "__builtin_ia32_vpermi2varps512_mask"
+            | "__builtin_ia32_vpermi2varps256_mask"
+            | "__builtin_ia32_vpermi2varps128_mask"
+            | "__builtin_ia32_vpermi2varpd512_mask"
+            | "__builtin_ia32_vpermi2varpd256_mask"
+            | "__builtin_ia32_vpermi2varpd128_mask"
+            | "__builtin_ia32_vpmadd52huq512_mask"
+            | "__builtin_ia32_vpmadd52luq512_mask"
+            | "__builtin_ia32_vpmadd52huq256_mask"
+            | "__builtin_ia32_vpmadd52luq256_mask"
+            | "__builtin_ia32_vpmadd52huq128_mask" => {
                 let mut new_args = args.to_vec();
                 let arg4_type = gcc_func.get_param_type(3);
                 let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
                 new_args.push(minus_one);
                 args = new_args.into();
-            },
-            "__builtin_ia32_cvtdq2ps512_mask" | "__builtin_ia32_cvtudq2ps512_mask"
-                | "__builtin_ia32_sqrtps512_mask" | "__builtin_ia32_sqrtpd512_mask" => {
+            }
+            "__builtin_ia32_cvtdq2ps512_mask"
+            | "__builtin_ia32_cvtudq2ps512_mask"
+            | "__builtin_ia32_sqrtps512_mask"
+            | "__builtin_ia32_sqrtpd512_mask" => {
                 let mut new_args = args.to_vec();
                 let last_arg = new_args.pop().expect("last arg");
                 let arg2_type = gcc_func.get_param_type(1);
-                let undefined = builder.current_func().new_local(None, arg2_type, "undefined_for_intrinsic").to_rvalue();
+                let undefined = builder
+                    .current_func()
+                    .new_local(None, arg2_type, "undefined_for_intrinsic")
+                    .to_rvalue();
                 new_args.push(undefined);
                 let arg3_type = gcc_func.get_param_type(2);
                 let minus_one = builder.context.new_rvalue_from_int(arg3_type, -1);
                 new_args.push(minus_one);
                 new_args.push(last_arg);
                 args = new_args.into();
-            },
+            }
             "__builtin_ia32_stmxcsr" => {
                 args = vec![].into();
-            },
-            "__builtin_ia32_addcarryx_u64" | "__builtin_ia32_sbb_u64" | "__builtin_ia32_addcarryx_u32" | "__builtin_ia32_sbb_u32" => {
+            }
+            "__builtin_ia32_addcarryx_u64"
+            | "__builtin_ia32_sbb_u64"
+            | "__builtin_ia32_addcarryx_u32"
+            | "__builtin_ia32_sbb_u32" => {
                 let mut new_args = args.to_vec();
                 let arg2_type = gcc_func.get_param_type(1);
                 let variable = builder.current_func().new_local(None, arg2_type, "addcarryResult");
                 new_args.push(variable.get_address(None));
                 args = new_args.into();
-            },
-            "__builtin_ia32_vpermt2varqi512_mask" | "__builtin_ia32_vpermt2varqi256_mask"
-                | "__builtin_ia32_vpermt2varqi128_mask" | "__builtin_ia32_vpermt2varhi512_mask"
-                | "__builtin_ia32_vpermt2varhi256_mask" | "__builtin_ia32_vpermt2varhi128_mask"
-                => {
+            }
+            "__builtin_ia32_vpermt2varqi512_mask"
+            | "__builtin_ia32_vpermt2varqi256_mask"
+            | "__builtin_ia32_vpermt2varqi128_mask"
+            | "__builtin_ia32_vpermt2varhi512_mask"
+            | "__builtin_ia32_vpermt2varhi256_mask"
+            | "__builtin_ia32_vpermt2varhi128_mask" => {
                 let new_args = args.to_vec();
                 let arg4_type = gcc_func.get_param_type(3);
                 let minus_one = builder.context.new_rvalue_from_int(arg4_type, -1);
                 args = vec![new_args[1], new_args[0], new_args[2], minus_one].into();
-            },
+            }
             "__builtin_ia32_xrstor" | "__builtin_ia32_xsavec" => {
                 let new_args = args.to_vec();
                 let thirty_two = builder.context.new_rvalue_from_int(new_args[1].get_type(), 32);
@@ -235,22 +361,25 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
                 let arg2_type = gcc_func.get_param_type(1);
                 let arg2 = builder.context.new_cast(None, arg2, arg2_type);
                 args = vec![new_args[0], arg2].into();
-            },
+            }
             // These builtins are sent one more argument than needed.
             "__builtin_prefetch" => {
                 let mut new_args = args.to_vec();
                 new_args.pop();
                 args = new_args.into();
-            },
+            }
             // The GCC version returns one value of the tuple through a pointer.
             "__builtin_ia32_rdrand64_step" => {
-                let arg = builder.current_func().new_local(None, builder.ulonglong_type, "return_rdrand_arg");
+                let arg = builder.current_func().new_local(
+                    None,
+                    builder.ulonglong_type,
+                    "return_rdrand_arg",
+                );
                 args = vec![arg.get_address(None)].into();
-            },
+            }
             _ => (),
         }
-    }
-    else {
+    } else {
         match &*func_name {
             "__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => {
                 let new_args = args.to_vec();
@@ -259,10 +388,10 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
                 let arg4_type = gcc_func.get_param_type(3);
                 let arg4 = builder.context.new_bitcast(None, new_args[2], arg4_type);
                 args = vec![new_args[0], new_args[1], arg3, arg4, new_args[3], new_args[5]].into();
-            },
+            }
             // NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors.
             // FIXME: the intrinsics like _mm_mask_fmadd_sd should probably directly call the GCC
-            // instrinsic to avoid this.
+            // intrinsic to avoid this.
             "__builtin_ia32_vfmaddss3_round" => {
                 let new_args = args.to_vec();
                 let arg1_type = gcc_func.get_param_type(0);
@@ -272,7 +401,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
                 let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 4]);
                 let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 4]);
                 args = vec![a, b, c, new_args[3]].into();
-            },
+            }
             "__builtin_ia32_vfmaddsd3_round" => {
                 let new_args = args.to_vec();
                 let arg1_type = gcc_func.get_param_type(0);
@@ -282,25 +411,34 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
                 let b = builder.context.new_rvalue_from_vector(None, arg2_type, &[new_args[1]; 2]);
                 let c = builder.context.new_rvalue_from_vector(None, arg3_type, &[new_args[2]; 2]);
                 args = vec![a, b, c, new_args[3]].into();
-            },
-            "__builtin_ia32_vfmaddsubpd256" | "__builtin_ia32_vfmaddsubps" | "__builtin_ia32_vfmaddsubps256"
-                | "__builtin_ia32_vfmaddsubpd" => {
+            }
+            "__builtin_ia32_vfmaddsubpd256"
+            | "__builtin_ia32_vfmaddsubps"
+            | "__builtin_ia32_vfmaddsubps256"
+            | "__builtin_ia32_vfmaddsubpd" => {
                 if let Some(original_function_name) = original_function_name {
                     match &**original_function_name {
-                        "llvm.x86.fma.vfmsubadd.pd.256" | "llvm.x86.fma.vfmsubadd.ps" | "llvm.x86.fma.vfmsubadd.ps.256"
-                            | "llvm.x86.fma.vfmsubadd.pd" => {
+                        "llvm.x86.fma.vfmsubadd.pd.256"
+                        | "llvm.x86.fma.vfmsubadd.ps"
+                        | "llvm.x86.fma.vfmsubadd.ps.256"
+                        | "llvm.x86.fma.vfmsubadd.pd" => {
                             // NOTE: since both llvm.x86.fma.vfmsubadd.ps and llvm.x86.fma.vfmaddsub.ps maps to
                             // __builtin_ia32_vfmaddsubps, only add minus if this comes from a
                             // subadd LLVM intrinsic, e.g. _mm256_fmsubadd_pd.
                             let mut new_args = args.to_vec();
                             let arg3 = &mut new_args[2];
-                            *arg3 = builder.context.new_unary_op(None, UnaryOp::Minus, arg3.get_type(), *arg3);
+                            *arg3 = builder.context.new_unary_op(
+                                None,
+                                UnaryOp::Minus,
+                                arg3.get_type(),
+                                *arg3,
+                            );
                             args = new_args.into();
-                        },
+                        }
                         _ => (),
                     }
                 }
-            },
+            }
             "__builtin_ia32_ldmxcsr" => {
                 // The builtin __builtin_ia32_ldmxcsr takes an integer value while llvm.x86.sse.ldmxcsr takes a pointer,
                 // so dereference the pointer.
@@ -309,23 +447,31 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
                 let arg1 = builder.context.new_cast(None, args[0], uint_ptr_type);
                 new_args[0] = arg1.dereference(None).to_rvalue();
                 args = new_args.into();
-            },
-            "__builtin_ia32_rcp14sd_mask" | "__builtin_ia32_rcp14ss_mask" | "__builtin_ia32_rsqrt14sd_mask"
-                | "__builtin_ia32_rsqrt14ss_mask" => {
+            }
+            "__builtin_ia32_rcp14sd_mask"
+            | "__builtin_ia32_rcp14ss_mask"
+            | "__builtin_ia32_rsqrt14sd_mask"
+            | "__builtin_ia32_rsqrt14ss_mask" => {
                 let new_args = args.to_vec();
                 args = vec![new_args[1], new_args[0], new_args[2], new_args[3]].into();
-            },
+            }
             "__builtin_ia32_sqrtsd_mask_round" | "__builtin_ia32_sqrtss_mask_round" => {
                 let new_args = args.to_vec();
                 args = vec![new_args[1], new_args[0], new_args[2], new_args[3], new_args[4]].into();
-            },
-            "__builtin_ia32_vpshrdv_v8di" | "__builtin_ia32_vpshrdv_v4di" | "__builtin_ia32_vpshrdv_v2di" |
-                "__builtin_ia32_vpshrdv_v16si" | "__builtin_ia32_vpshrdv_v8si" | "__builtin_ia32_vpshrdv_v4si" |
-                "__builtin_ia32_vpshrdv_v32hi" | "__builtin_ia32_vpshrdv_v16hi" | "__builtin_ia32_vpshrdv_v8hi" => {
+            }
+            "__builtin_ia32_vpshrdv_v8di"
+            | "__builtin_ia32_vpshrdv_v4di"
+            | "__builtin_ia32_vpshrdv_v2di"
+            | "__builtin_ia32_vpshrdv_v16si"
+            | "__builtin_ia32_vpshrdv_v8si"
+            | "__builtin_ia32_vpshrdv_v4si"
+            | "__builtin_ia32_vpshrdv_v32hi"
+            | "__builtin_ia32_vpshrdv_v16hi"
+            | "__builtin_ia32_vpshrdv_v8hi" => {
                 // The first two arguments are reversed, compared to LLVM.
                 let new_args = args.to_vec();
                 args = vec![new_args[1], new_args[0], new_args[2]].into();
-            },
+            }
             _ => (),
         }
     }
@@ -333,16 +479,27 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc
     args
 }
 
-pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc, 'tcx>, mut return_value: RValue<'gcc>, func_name: &str, args: &[RValue<'gcc>], args_adjusted: bool, orig_args: &[RValue<'gcc>]) -> RValue<'gcc> {
+pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(
+    builder: &Builder<'a, 'gcc, 'tcx>,
+    mut return_value: RValue<'gcc>,
+    func_name: &str,
+    args: &[RValue<'gcc>],
+    args_adjusted: bool,
+    orig_args: &[RValue<'gcc>],
+) -> RValue<'gcc> {
     match func_name {
         "__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => {
-            #[cfg(feature="master")]
+            #[cfg(feature = "master")]
             {
                 let zero = builder.context.new_rvalue_zero(builder.int_type);
-                return_value = builder.context.new_vector_access(None, return_value, zero).to_rvalue();
+                return_value =
+                    builder.context.new_vector_access(None, return_value, zero).to_rvalue();
             }
-        },
-        "__builtin_ia32_addcarryx_u64" | "__builtin_ia32_sbb_u64" | "__builtin_ia32_addcarryx_u32" | "__builtin_ia32_sbb_u32" => {
+        }
+        "__builtin_ia32_addcarryx_u64"
+        | "__builtin_ia32_sbb_u64"
+        | "__builtin_ia32_addcarryx_u32"
+        | "__builtin_ia32_sbb_u32" => {
             // Both llvm.x86.addcarry.32 and llvm.x86.addcarryx.u32 points to the same GCC builtin,
             // but only the former requires adjusting the return value.
             // Those 2 LLVM intrinsics differ by their argument count, that's why we check if the
@@ -351,10 +508,16 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc,
                 let last_arg = args.last().expect("last arg");
                 let field1 = builder.context.new_field(None, builder.u8_type, "carryFlag");
                 let field2 = builder.context.new_field(None, args[1].get_type(), "carryResult");
-                let struct_type = builder.context.new_struct_type(None, "addcarryResult", &[field1, field2]);
-                return_value = builder.context.new_struct_constructor(None, struct_type.as_type(), None, &[return_value, last_arg.dereference(None).to_rvalue()]);
+                let struct_type =
+                    builder.context.new_struct_type(None, "addcarryResult", &[field1, field2]);
+                return_value = builder.context.new_struct_constructor(
+                    None,
+                    struct_type.as_type(),
+                    None,
+                    &[return_value, last_arg.dereference(None).to_rvalue()],
+                );
             }
-        },
+        }
         "__builtin_ia32_stmxcsr" => {
             // The builtin __builtin_ia32_stmxcsr returns a value while llvm.x86.sse.stmxcsr writes
             // the result in its pointer argument.
@@ -366,20 +529,24 @@ pub fn adjust_intrinsic_return_value<'a, 'gcc, 'tcx>(builder: &Builder<'a, 'gcc,
             // The return value was assigned to the result pointer above. In order to not call the
             // builtin twice, we overwrite the return value with a dummy value.
             return_value = builder.context.new_rvalue_zero(builder.int_type);
-        },
+        }
         "__builtin_ia32_rdrand64_step" => {
             let random_number = args[0].dereference(None).to_rvalue();
-            let success_variable = builder.current_func().new_local(None, return_value.get_type(), "success");
+            let success_variable =
+                builder.current_func().new_local(None, return_value.get_type(), "success");
             builder.llbb().add_assignment(None, success_variable, return_value);
 
             let field1 = builder.context.new_field(None, random_number.get_type(), "random_number");
             let field2 = builder.context.new_field(None, return_value.get_type(), "success");
-            let struct_type = builder.context.new_struct_type(None, "rdrand_result", &[field1, field2]);
-            return_value = builder.context.new_struct_constructor(None, struct_type.as_type(), None, &[
-                random_number,
-                success_variable.to_rvalue(),
-            ]);
-        },
+            let struct_type =
+                builder.context.new_struct_type(None, "rdrand_result", &[field1, field2]);
+            return_value = builder.context.new_struct_constructor(
+                None,
+                struct_type.as_type(),
+                None,
+                &[random_number, success_variable.to_rvalue()],
+            );
+        }
         _ => (),
     }
 
@@ -391,23 +558,33 @@ pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool {
     match func_name {
         // NOTE: these intrinsics have missing parameters before the last one, so ignore the
         // last argument type check.
-        "__builtin_ia32_maxps512_mask" | "__builtin_ia32_maxpd512_mask"
-            | "__builtin_ia32_minps512_mask" | "__builtin_ia32_minpd512_mask" | "__builtin_ia32_sqrtps512_mask"
-            | "__builtin_ia32_sqrtpd512_mask" | "__builtin_ia32_addps512_mask" | "__builtin_ia32_addpd512_mask"
-            | "__builtin_ia32_subps512_mask" | "__builtin_ia32_subpd512_mask"
-            | "__builtin_ia32_mulps512_mask" | "__builtin_ia32_mulpd512_mask"
-            | "__builtin_ia32_divps512_mask" | "__builtin_ia32_divpd512_mask"
-            | "__builtin_ia32_vfmaddsubps512_mask" | "__builtin_ia32_vfmaddsubpd512_mask"
-            | "__builtin_ia32_cvtdq2ps512_mask" | "__builtin_ia32_cvtudq2ps512_mask" => {
-                if index == args_len - 1 {
-                    return true;
-                }
-            },
+        "__builtin_ia32_maxps512_mask"
+        | "__builtin_ia32_maxpd512_mask"
+        | "__builtin_ia32_minps512_mask"
+        | "__builtin_ia32_minpd512_mask"
+        | "__builtin_ia32_sqrtps512_mask"
+        | "__builtin_ia32_sqrtpd512_mask"
+        | "__builtin_ia32_addps512_mask"
+        | "__builtin_ia32_addpd512_mask"
+        | "__builtin_ia32_subps512_mask"
+        | "__builtin_ia32_subpd512_mask"
+        | "__builtin_ia32_mulps512_mask"
+        | "__builtin_ia32_mulpd512_mask"
+        | "__builtin_ia32_divps512_mask"
+        | "__builtin_ia32_divpd512_mask"
+        | "__builtin_ia32_vfmaddsubps512_mask"
+        | "__builtin_ia32_vfmaddsubpd512_mask"
+        | "__builtin_ia32_cvtdq2ps512_mask"
+        | "__builtin_ia32_cvtudq2ps512_mask" => {
+            if index == args_len - 1 {
+                return true;
+            }
+        }
         "__builtin_ia32_rndscaless_mask_round" | "__builtin_ia32_rndscalesd_mask_round" => {
             if index == 2 || index == 3 {
                 return true;
             }
-        },
+        }
         "__builtin_ia32_vfmaddps512_mask" | "__builtin_ia32_vfmaddpd512_mask" => {
             // Since there are two LLVM intrinsics that map to each of these GCC builtins and only
             // one of them has a missing parameter before the last one, we check the number of
@@ -415,49 +592,50 @@ pub fn ignore_arg_cast(func_name: &str, index: usize, args_len: usize) -> bool {
             if args_len == 4 && index == args_len - 1 {
                 return true;
             }
-        },
+        }
         // NOTE: the LLVM intrinsic receives 3 floats, but the GCC builtin requires 3 vectors.
         "__builtin_ia32_vfmaddss3_round" | "__builtin_ia32_vfmaddsd3_round" => return true,
-        "__builtin_ia32_vplzcntd_512_mask" | "__builtin_ia32_vplzcntd_256_mask" | "__builtin_ia32_vplzcntd_128_mask"
-            | "__builtin_ia32_vplzcntq_512_mask" | "__builtin_ia32_vplzcntq_256_mask" | "__builtin_ia32_vplzcntq_128_mask" => {
+        "__builtin_ia32_vplzcntd_512_mask"
+        | "__builtin_ia32_vplzcntd_256_mask"
+        | "__builtin_ia32_vplzcntd_128_mask"
+        | "__builtin_ia32_vplzcntq_512_mask"
+        | "__builtin_ia32_vplzcntq_256_mask"
+        | "__builtin_ia32_vplzcntq_128_mask" => {
             if index == args_len - 1 {
                 return true;
             }
-        },
+        }
         _ => (),
     }
 
     false
 }
 
-#[cfg(not(feature="master"))]
+#[cfg(not(feature = "master"))]
 pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
-    let gcc_name =
-        match name {
-            "llvm.x86.sse2.pause" => {
-                // NOTE: pause is only a hint, so we use a dummy built-in because target built-ins
-                // are not supported in libgccjit 12.
-                "__builtin_inff"
-            },
-            "llvm.x86.xgetbv" => {
-                "__builtin_trap"
-            },
-            _ => unimplemented!("unsupported LLVM intrinsic {}", name),
-        };
+    let gcc_name = match name {
+        "llvm.x86.sse2.pause" => {
+            // NOTE: pause is only a hint, so we use a dummy built-in because target built-ins
+            // are not supported in libgccjit 12.
+            "__builtin_inff"
+        }
+        "llvm.x86.xgetbv" => "__builtin_trap",
+        _ => unimplemented!("unsupported LLVM intrinsic {}", name),
+    };
     let func = cx.context.get_builtin_function(gcc_name);
     cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
     return func;
 }
 
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 pub fn intrinsic<'gcc, 'tcx>(name: &str, cx: &CodegenCx<'gcc, 'tcx>) -> Function<'gcc> {
     match name {
         "llvm.prefetch" => {
             let gcc_name = "__builtin_prefetch";
             let func = cx.context.get_builtin_function(gcc_name);
             cx.functions.borrow_mut().insert(gcc_name.to_string(), func);
-            return func
-        },
+            return func;
+        }
         _ => (),
     }
 
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index d43f5d74757..a6c8b72e851 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -1,43 +1,48 @@
 pub mod llvm;
 mod simd;
 
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use std::iter;
 
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use gccjit::FunctionType;
 use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
-use rustc_codegen_ssa::MemFlags;
 use rustc_codegen_ssa::base::wants_msvc_seh;
 use rustc_codegen_ssa::common::IntPredicate;
+use rustc_codegen_ssa::errors::InvalidMonomorphization;
 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
 use rustc_codegen_ssa::mir::place::PlaceRef;
-use rustc_codegen_ssa::traits::{ArgAbiMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
-#[cfg(feature="master")]
+use rustc_codegen_ssa::traits::{
+    ArgAbiMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods,
+};
+#[cfg(feature = "master")]
 use rustc_codegen_ssa::traits::{BaseTypeMethods, MiscMethods};
-use rustc_codegen_ssa::errors::InvalidMonomorphization;
+use rustc_codegen_ssa::MemFlags;
 use rustc_middle::bug;
-use rustc_middle::ty::{self, Instance, Ty};
 use rustc_middle::ty::layout::LayoutOf;
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
-use rustc_span::{Span, Symbol, sym};
-use rustc_target::abi::HasDataLayout;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_span::{sym, Span, Symbol};
 use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
-use rustc_target::spec::PanicStrategy;
-#[cfg(feature="master")]
+use rustc_target::abi::HasDataLayout;
+#[cfg(feature = "master")]
 use rustc_target::spec::abi::Abi;
+use rustc_target::spec::PanicStrategy;
 
-use crate::abi::GccType;
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use crate::abi::FnAbiGccExt;
+use crate::abi::GccType;
 use crate::builder::Builder;
 use crate::common::{SignType, TypeReflection};
 use crate::context::CodegenCx;
-use crate::type_of::LayoutGccExt;
 use crate::intrinsic::simd::generic_simd_intrinsic;
+use crate::type_of::LayoutGccExt;
 
-fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> {
+fn get_simple_intrinsic<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    name: Symbol,
+) -> Option<Function<'gcc>> {
     let gcc_name = match name {
         sym::sqrtf32 => "sqrtf",
         sym::sqrtf64 => "sqrt",
@@ -90,7 +95,14 @@ fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) ->
 }
 
 impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
-    fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) -> Result<(), Instance<'tcx>> {
+    fn codegen_intrinsic_call(
+        &mut self,
+        instance: Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        args: &[OperandRef<'tcx, RValue<'gcc>>],
+        llresult: RValue<'gcc>,
+        span: Span,
+    ) -> Result<(), Instance<'tcx>> {
         let tcx = self.tcx;
         let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
 
@@ -110,268 +122,274 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
         let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
 
         let simple = get_simple_intrinsic(self, name);
-        let llval =
-            match name {
-                _ if simple.is_some() => {
-                    // FIXME(antoyo): remove this cast when the API supports function.
-                    let func = unsafe { std::mem::transmute(simple.expect("simple")) };
-                    self.call(self.type_void(), None, None, func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
-                },
-                sym::likely => {
-                    self.expect(args[0].immediate(), true)
-                }
-                sym::unlikely => {
-                    self.expect(args[0].immediate(), false)
-                }
-                sym::is_val_statically_known => {
-                    let a = args[0].immediate();
-                    let builtin = self.context.get_builtin_function("__builtin_constant_p");
-                    let res = self.context.new_call(None, builtin, &[a]);
-                    self.icmp(IntPredicate::IntEQ, res, self.const_i32(0))
-                }
-                sym::catch_unwind => {
-                    try_intrinsic(
-                        self,
-                        args[0].immediate(),
-                        args[1].immediate(),
-                        args[2].immediate(),
-                        llresult,
-                    );
-                    return Ok(());
-                }
-                sym::breakpoint => {
-                    unimplemented!();
-                }
-                sym::va_copy => {
-                    unimplemented!();
-                }
-                sym::va_arg => {
-                    unimplemented!();
-                }
+        let llval = match name {
+            _ if simple.is_some() => {
+                // FIXME(antoyo): remove this cast when the API supports function.
+                let func = unsafe { std::mem::transmute(simple.expect("simple")) };
+                self.call(
+                    self.type_void(),
+                    None,
+                    None,
+                    func,
+                    &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
+                    None,
+                )
+            }
+            sym::likely => self.expect(args[0].immediate(), true),
+            sym::unlikely => self.expect(args[0].immediate(), false),
+            sym::is_val_statically_known => {
+                let a = args[0].immediate();
+                let builtin = self.context.get_builtin_function("__builtin_constant_p");
+                let res = self.context.new_call(None, builtin, &[a]);
+                self.icmp(IntPredicate::IntEQ, res, self.const_i32(0))
+            }
+            sym::catch_unwind => {
+                try_intrinsic(
+                    self,
+                    args[0].immediate(),
+                    args[1].immediate(),
+                    args[2].immediate(),
+                    llresult,
+                );
+                return Ok(());
+            }
+            sym::breakpoint => {
+                unimplemented!();
+            }
+            sym::va_copy => {
+                unimplemented!();
+            }
+            sym::va_arg => {
+                unimplemented!();
+            }
 
-                sym::volatile_load | sym::unaligned_volatile_load => {
-                    let tp_ty = fn_args.type_at(0);
-                    let ptr = args[0].immediate();
-                    let load =
-                        if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
-                            let gcc_ty = ty.gcc_type(self);
-                            self.volatile_load(gcc_ty, ptr)
+            sym::volatile_load | sym::unaligned_volatile_load => {
+                let tp_ty = fn_args.type_at(0);
+                let ptr = args[0].immediate();
+                let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
+                    let gcc_ty = ty.gcc_type(self);
+                    self.volatile_load(gcc_ty, ptr)
+                } else {
+                    self.volatile_load(self.layout_of(tp_ty).gcc_type(self), ptr)
+                };
+                // TODO(antoyo): set alignment.
+                self.to_immediate(load, self.layout_of(tp_ty))
+            }
+            sym::volatile_store => {
+                let dst = args[0].deref(self.cx());
+                args[1].val.volatile_store(self, dst);
+                return Ok(());
+            }
+            sym::unaligned_volatile_store => {
+                let dst = args[0].deref(self.cx());
+                args[1].val.unaligned_volatile_store(self, dst);
+                return Ok(());
+            }
+            sym::prefetch_read_data
+            | sym::prefetch_write_data
+            | sym::prefetch_read_instruction
+            | sym::prefetch_write_instruction => {
+                unimplemented!();
+            }
+            sym::ctlz
+            | sym::ctlz_nonzero
+            | sym::cttz
+            | sym::cttz_nonzero
+            | sym::ctpop
+            | sym::bswap
+            | sym::bitreverse
+            | sym::rotate_left
+            | sym::rotate_right
+            | sym::saturating_add
+            | sym::saturating_sub => {
+                let ty = arg_tys[0];
+                match int_type_width_signed(ty, self) {
+                    Some((width, signed)) => match name {
+                        sym::ctlz | sym::cttz => {
+                            let func = self.current_func.borrow().expect("func");
+                            let then_block = func.new_block("then");
+                            let else_block = func.new_block("else");
+                            let after_block = func.new_block("after");
+
+                            let arg = args[0].immediate();
+                            let result = func.new_local(None, arg.get_type(), "zeros");
+                            let zero = self.cx.gcc_zero(arg.get_type());
+                            let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero);
+                            self.llbb().end_with_conditional(None, cond, then_block, else_block);
+
+                            let zero_result = self.cx.gcc_uint(arg.get_type(), width);
+                            then_block.add_assignment(None, result, zero_result);
+                            then_block.end_with_jump(None, after_block);
+
+                            // NOTE: since jumps were added in a place
+                            // count_leading_zeroes() does not expect, the current block
+                            // in the state need to be updated.
+                            self.switch_to_block(else_block);
+
+                            let zeros = match name {
+                                sym::ctlz => self.count_leading_zeroes(width, arg),
+                                sym::cttz => self.count_trailing_zeroes(width, arg),
+                                _ => unreachable!(),
+                            };
+                            self.llbb().add_assignment(None, result, zeros);
+                            self.llbb().end_with_jump(None, after_block);
+
+                            // NOTE: since jumps were added in a place rustc does not
+                            // expect, the current block in the state need to be updated.
+                            self.switch_to_block(after_block);
+
+                            result.to_rvalue()
                         }
-                        else {
-                            self.volatile_load(self.layout_of(tp_ty).gcc_type(self), ptr)
-                        };
-                    // TODO(antoyo): set alignment.
-                    self.to_immediate(load, self.layout_of(tp_ty))
-                }
-                sym::volatile_store => {
-                    let dst = args[0].deref(self.cx());
-                    args[1].val.volatile_store(self, dst);
-                    return Ok(());
-                }
-                sym::unaligned_volatile_store => {
-                    let dst = args[0].deref(self.cx());
-                    args[1].val.unaligned_volatile_store(self, dst);
-                    return Ok(());
-                }
-                sym::prefetch_read_data
-                    | sym::prefetch_write_data
-                    | sym::prefetch_read_instruction
-                    | sym::prefetch_write_instruction => {
-                        unimplemented!();
-                    }
-                sym::ctlz
-                    | sym::ctlz_nonzero
-                    | sym::cttz
-                    | sym::cttz_nonzero
-                    | sym::ctpop
-                    | sym::bswap
-                    | sym::bitreverse
-                    | sym::rotate_left
-                    | sym::rotate_right
-                    | sym::saturating_add
-                    | sym::saturating_sub => {
-                        let ty = arg_tys[0];
-                        match int_type_width_signed(ty, self) {
-                            Some((width, signed)) => match name {
-                                sym::ctlz | sym::cttz => {
-                                    let func = self.current_func.borrow().expect("func");
-                                    let then_block = func.new_block("then");
-                                    let else_block = func.new_block("else");
-                                    let after_block = func.new_block("after");
-
-                                    let arg = args[0].immediate();
-                                    let result = func.new_local(None, arg.get_type(), "zeros");
-                                    let zero = self.cx.gcc_zero(arg.get_type());
-                                    let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero);
-                                    self.llbb().end_with_conditional(None, cond, then_block, else_block);
-
-                                    let zero_result = self.cx.gcc_uint(arg.get_type(), width);
-                                    then_block.add_assignment(None, result, zero_result);
-                                    then_block.end_with_jump(None, after_block);
-
-                                    // NOTE: since jumps were added in a place
-                                    // count_leading_zeroes() does not expect, the current block
-                                    // in the state need to be updated.
-                                    self.switch_to_block(else_block);
-
-                                    let zeros =
-                                        match name {
-                                            sym::ctlz => self.count_leading_zeroes(width, arg),
-                                            sym::cttz => self.count_trailing_zeroes(width, arg),
-                                            _ => unreachable!(),
-                                        };
-                                    self.llbb().add_assignment(None, result, zeros);
-                                    self.llbb().end_with_jump(None, after_block);
-
-                                    // NOTE: since jumps were added in a place rustc does not
-                                    // expect, the current block in the state need to be updated.
-                                    self.switch_to_block(after_block);
-
-                                    result.to_rvalue()
-                                }
-                                sym::ctlz_nonzero => {
-                                    self.count_leading_zeroes(width, args[0].immediate())
-                                },
-                                sym::cttz_nonzero => {
-                                    self.count_trailing_zeroes(width, args[0].immediate())
-                                }
-                                sym::ctpop => self.pop_count(args[0].immediate()),
-                                sym::bswap => {
-                                    if width == 8 {
-                                        args[0].immediate() // byte swap a u8/i8 is just a no-op
-                                    }
-                                    else {
-                                        self.gcc_bswap(args[0].immediate(), width)
-                                    }
-                                },
-                                sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
-                                sym::rotate_left | sym::rotate_right => {
-                                    // TODO(antoyo): implement using algorithm from:
-                                    // https://blog.regehr.org/archives/1063
-                                    // for other platforms.
-                                    let is_left = name == sym::rotate_left;
-                                    let val = args[0].immediate();
-                                    let raw_shift = args[1].immediate();
-                                    if is_left {
-                                        self.rotate_left(val, raw_shift, width)
-                                    }
-                                    else {
-                                        self.rotate_right(val, raw_shift, width)
-                                    }
-                                },
-                                sym::saturating_add => {
-                                    self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width)
-                                },
-                                sym::saturating_sub => {
-                                    self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width)
-                                },
-                                _ => bug!(),
-                            },
-                            None => {
-                                tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
-                                return Ok(());
+                        sym::ctlz_nonzero => self.count_leading_zeroes(width, args[0].immediate()),
+                        sym::cttz_nonzero => self.count_trailing_zeroes(width, args[0].immediate()),
+                        sym::ctpop => self.pop_count(args[0].immediate()),
+                        sym::bswap => {
+                            if width == 8 {
+                                args[0].immediate() // byte swap a u8/i8 is just a no-op
+                            } else {
+                                self.gcc_bswap(args[0].immediate(), width)
                             }
                         }
-                    }
-
-                sym::raw_eq => {
-                    use rustc_target::abi::Abi::*;
-                    let tp_ty = fn_args.type_at(0);
-                    let layout = self.layout_of(tp_ty).layout;
-                    let _use_integer_compare = match layout.abi() {
-                        Scalar(_) | ScalarPair(_, _) => true,
-                        Uninhabited | Vector { .. } => false,
-                        Aggregate { .. } => {
-                            // For rusty ABIs, small aggregates are actually passed
-                            // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
-                            // so we re-use that same threshold here.
-                            layout.size() <= self.data_layout().pointer_size * 2
+                        sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
+                        sym::rotate_left | sym::rotate_right => {
+                            // TODO(antoyo): implement using algorithm from:
+                            // https://blog.regehr.org/archives/1063
+                            // for other platforms.
+                            let is_left = name == sym::rotate_left;
+                            let val = args[0].immediate();
+                            let raw_shift = args[1].immediate();
+                            if is_left {
+                                self.rotate_left(val, raw_shift, width)
+                            } else {
+                                self.rotate_right(val, raw_shift, width)
+                            }
                         }
-                    };
-
-                    let a = args[0].immediate();
-                    let b = args[1].immediate();
-                    if layout.size().bytes() == 0 {
-                        self.const_bool(true)
-                    }
-                    /*else if use_integer_compare {
-                        let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
-                        let ptr_ty = self.type_ptr_to(integer_ty);
-                        let a_ptr = self.bitcast(a, ptr_ty);
-                        let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
-                        let b_ptr = self.bitcast(b, ptr_ty);
-                        let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
-                        self.icmp(IntPredicate::IntEQ, a_val, b_val)
-                    }*/
-                    else {
-                        let void_ptr_type = self.context.new_type::<*const ()>();
-                        let a_ptr = self.bitcast(a, void_ptr_type);
-                        let b_ptr = self.bitcast(b, void_ptr_type);
-                        let n = self.context.new_cast(None, self.const_usize(layout.size().bytes()), self.sizet_type);
-                        let builtin = self.context.get_builtin_function("memcmp");
-                        let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
-                        self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
+                        sym::saturating_add => self.saturating_add(
+                            args[0].immediate(),
+                            args[1].immediate(),
+                            signed,
+                            width,
+                        ),
+                        sym::saturating_sub => self.saturating_sub(
+                            args[0].immediate(),
+                            args[1].immediate(),
+                            signed,
+                            width,
+                        ),
+                        _ => bug!(),
+                    },
+                    None => {
+                        tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
+                            span,
+                            name,
+                            ty,
+                        });
+                        return Ok(());
                     }
                 }
+            }
 
-                sym::compare_bytes => {
-                    let a = args[0].immediate();
-                    let b = args[1].immediate();
-                    let n = args[2].immediate();
+            sym::raw_eq => {
+                use rustc_target::abi::Abi::*;
+                let tp_ty = fn_args.type_at(0);
+                let layout = self.layout_of(tp_ty).layout;
+                let _use_integer_compare = match layout.abi() {
+                    Scalar(_) | ScalarPair(_, _) => true,
+                    Uninhabited | Vector { .. } => false,
+                    Aggregate { .. } => {
+                        // For rusty ABIs, small aggregates are actually passed
+                        // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
+                        // so we re-use that same threshold here.
+                        layout.size() <= self.data_layout().pointer_size * 2
+                    }
+                };
 
+                let a = args[0].immediate();
+                let b = args[1].immediate();
+                if layout.size().bytes() == 0 {
+                    self.const_bool(true)
+                }
+                /*else if use_integer_compare {
+                    let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
+                    let ptr_ty = self.type_ptr_to(integer_ty);
+                    let a_ptr = self.bitcast(a, ptr_ty);
+                    let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
+                    let b_ptr = self.bitcast(b, ptr_ty);
+                    let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
+                    self.icmp(IntPredicate::IntEQ, a_val, b_val)
+                }*/
+                else {
                     let void_ptr_type = self.context.new_type::<*const ()>();
                     let a_ptr = self.bitcast(a, void_ptr_type);
                     let b_ptr = self.bitcast(b, void_ptr_type);
-
-                    // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+                    let n = self.context.new_cast(
+                        None,
+                        self.const_usize(layout.size().bytes()),
+                        self.sizet_type,
+                    );
                     let builtin = self.context.get_builtin_function("memcmp");
                     let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
-                    self.sext(cmp, self.type_ix(32))
+                    self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
                 }
+            }
 
-                sym::black_box => {
-                    args[0].val.store(self, result);
+            sym::compare_bytes => {
+                let a = args[0].immediate();
+                let b = args[1].immediate();
+                let n = args[2].immediate();
 
-                    let block = self.llbb();
-                    let extended_asm = block.add_extended_asm(None, "");
-                    extended_asm.add_input_operand(None, "r", result.llval);
-                    extended_asm.add_clobber("memory");
-                    extended_asm.set_volatile_flag(true);
+                let void_ptr_type = self.context.new_type::<*const ()>();
+                let a_ptr = self.bitcast(a, void_ptr_type);
+                let b_ptr = self.bitcast(b, void_ptr_type);
 
-                    // We have copied the value to `result` already.
-                    return Ok(());
-                }
+                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+                let builtin = self.context.get_builtin_function("memcmp");
+                let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
+                self.sext(cmp, self.type_ix(32))
+            }
 
-                sym::ptr_mask => {
-                    let usize_type = self.context.new_type::<usize>();
-                    let void_ptr_type = self.context.new_type::<*const ()>();
+            sym::black_box => {
+                args[0].val.store(self, result);
 
-                    let ptr = args[0].immediate();
-                    let mask = args[1].immediate();
+                let block = self.llbb();
+                let extended_asm = block.add_extended_asm(None, "");
+                extended_asm.add_input_operand(None, "r", result.llval);
+                extended_asm.add_clobber("memory");
+                extended_asm.set_volatile_flag(true);
 
-                    let addr = self.bitcast(ptr, usize_type);
-                    let masked = self.and(addr, mask);
-                    self.bitcast(masked, void_ptr_type)
-                },
+                // We have copied the value to `result` already.
+                return Ok(());
+            }
 
-                _ if name_str.starts_with("simd_") => {
-                    match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
-                        Ok(llval) => llval,
-                        Err(()) => return Ok(()),
-                    }
+            sym::ptr_mask => {
+                let usize_type = self.context.new_type::<usize>();
+                let void_ptr_type = self.context.new_type::<*const ()>();
+
+                let ptr = args[0].immediate();
+                let mask = args[1].immediate();
+
+                let addr = self.bitcast(ptr, usize_type);
+                let masked = self.and(addr, mask);
+                self.bitcast(masked, void_ptr_type)
+            }
+
+            _ if name_str.starts_with("simd_") => {
+                match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+                    Ok(llval) => llval,
+                    Err(()) => return Ok(()),
                 }
+            }
 
-                // Fall back to default body
-                _ => return Err(Instance::new(instance.def_id(), instance.args)),
-            };
+            // Fall back to default body
+            _ => return Err(Instance::new(instance.def_id(), instance.args)),
+        };
 
         if !fn_abi.ret.is_ignore() {
             if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
                 let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
                 let ptr = self.pointercast(result.llval, ptr_llty);
                 self.store(llval, ptr, result.align);
-            }
-            else {
+            } else {
                 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
                     .val
                     .store(self, result);
@@ -423,11 +441,21 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
 }
 
 impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
-    fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) {
+    fn store_fn_arg(
+        &mut self,
+        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, Self::Value>,
+    ) {
         arg_abi.store_fn_arg(self, idx, dst)
     }
 
-    fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+    fn store_arg(
+        &mut self,
+        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        val: RValue<'gcc>,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
         arg_abi.store(self, val, dst)
     }
 
@@ -438,8 +466,18 @@ impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
 
 pub trait ArgAbiExt<'gcc, 'tcx> {
     fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
-    fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>);
-    fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>);
+    fn store(
+        &self,
+        bx: &mut Builder<'_, 'gcc, 'tcx>,
+        val: RValue<'gcc>,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    );
+    fn store_fn_arg(
+        &self,
+        bx: &mut Builder<'_, 'gcc, 'tcx>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    );
 }
 
 impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
@@ -453,17 +491,20 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
     /// place for the original Rust type of this argument/return.
     /// Can be used for both storing formal arguments into Rust variables
     /// or results of call/invoke instructions into their destinations.
-    fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+    fn store(
+        &self,
+        bx: &mut Builder<'_, 'gcc, 'tcx>,
+        val: RValue<'gcc>,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
         if self.is_ignore() {
             return;
         }
         if self.is_sized_indirect() {
             OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
-        }
-        else if self.is_unsized_indirect() {
+        } else if self.is_unsized_indirect() {
             bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
-        }
-        else if let PassMode::Cast { ref cast, .. } = self.mode {
+        } else if let PassMode::Cast { ref cast, .. } = self.mode {
             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
@@ -471,8 +512,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
                 let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
                 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
                 bx.store(val, cast_dst, self.layout.align.abi);
-            }
-            else {
+            } else {
                 // The actual return type is a struct, but the ABI
                 // adaptation code has cast it into some scalar type.  The
                 // code that follows is the only reliable way I have
@@ -508,35 +548,44 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
 
                 bx.lifetime_end(llscratch, scratch_size);
             }
-        }
-        else {
+        } else {
             OperandValue::Immediate(val).store(bx, dst);
         }
     }
 
-    fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+    fn store_fn_arg<'a>(
+        &self,
+        bx: &mut Builder<'a, 'gcc, 'tcx>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
         let mut next = || {
             let val = bx.current_func().get_param(*idx as i32);
             *idx += 1;
             val.to_rvalue()
         };
         match self.mode {
-            PassMode::Ignore => {},
+            PassMode::Ignore => {}
             PassMode::Pair(..) => {
                 OperandValue::Pair(next(), next()).store(bx, dst);
-            },
+            }
             PassMode::Indirect { meta_attrs: Some(_), .. } => {
                 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
-            },
-            PassMode::Direct(_) | PassMode::Indirect { meta_attrs: None, .. } | PassMode::Cast { .. } => {
+            }
+            PassMode::Direct(_)
+            | PassMode::Indirect { meta_attrs: None, .. }
+            | PassMode::Cast { .. } => {
                 let next_arg = next();
                 self.store(bx, next_arg, dst);
-            },
+            }
         }
     }
 }
 
-fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> {
+fn int_type_width_signed<'gcc, 'tcx>(
+    ty: Ty<'tcx>,
+    cx: &CodegenCx<'gcc, 'tcx>,
+) -> Option<(u64, bool)> {
     match ty.kind() {
         ty::Int(t) => Some((
             match t {
@@ -570,82 +619,76 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let typ = result_type.to_unsigned(self.cx);
 
         let value =
-            if result_type.is_signed(self.cx) {
-                self.gcc_int_cast(value, typ)
-            }
-            else {
-                value
-            };
+            if result_type.is_signed(self.cx) { self.gcc_int_cast(value, typ) } else { value };
 
         let context = &self.cx.context;
-        let result =
-            match width {
-                8 | 16 | 32 | 64 => {
-                    let mask = ((1u128 << width) - 1) as u64;
-                    let (m0, m1, m2) = if width > 16 {
-                        (
-                            context.new_rvalue_from_long(typ, (0x5555555555555555u64 & mask) as i64),
-                            context.new_rvalue_from_long(typ, (0x3333333333333333u64 & mask) as i64),
-                            context.new_rvalue_from_long(typ, (0x0f0f0f0f0f0f0f0fu64 & mask) as i64),
-                        )
-                    } else {
-                        (
-                            context.new_rvalue_from_int(typ, (0x5555u64 & mask) as i32),
-                            context.new_rvalue_from_int(typ, (0x3333u64 & mask) as i32),
-                            context.new_rvalue_from_int(typ, (0x0f0fu64 & mask) as i32),
-                        )
-                    };
-                    let one = context.new_rvalue_from_int(typ, 1);
-                    let two = context.new_rvalue_from_int(typ, 2);
-                    let four = context.new_rvalue_from_int(typ, 4);
-
-                    // First step.
-                    let left = self.lshr(value, one);
-                    let left = self.and(left, m0);
-                    let right = self.and(value, m0);
-                    let right = self.shl(right, one);
-                    let step1 = self.or(left, right);
-
-                    // Second step.
-                    let left = self.lshr(step1, two);
-                    let left = self.and(left, m1);
-                    let right = self.and(step1, m1);
-                    let right = self.shl(right, two);
-                    let step2 = self.or(left, right);
-
-                    // Third step.
-                    let left = self.lshr(step2, four);
-                    let left = self.and(left, m2);
-                    let right = self.and(step2, m2);
-                    let right = self.shl(right, four);
-                    let step3 = self.or(left, right);
-
-                    // Fourth step.
-                    if width == 8 {
-                        step3
-                    } else {
-                        self.gcc_bswap(step3, width)
-                    }
-                },
-                128 => {
-                    // TODO(antoyo): find a more efficient implementation?
-                    let sixty_four = self.gcc_int(typ, 64);
-                    let right_shift = self.gcc_lshr(value, sixty_four);
-                    let high = self.gcc_int_cast(right_shift, self.u64_type);
-                    let low = self.gcc_int_cast(value, self.u64_type);
-
-                    let reversed_high = self.bit_reverse(64, high);
-                    let reversed_low = self.bit_reverse(64, low);
-
-                    let new_low = self.gcc_int_cast(reversed_high, typ);
-                    let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four);
-
-                    self.gcc_or(new_low, new_high)
-                },
-                _ => {
-                    panic!("cannot bit reverse with width = {}", width);
-                },
-            };
+        let result = match width {
+            8 | 16 | 32 | 64 => {
+                let mask = ((1u128 << width) - 1) as u64;
+                let (m0, m1, m2) = if width > 16 {
+                    (
+                        context.new_rvalue_from_long(typ, (0x5555555555555555u64 & mask) as i64),
+                        context.new_rvalue_from_long(typ, (0x3333333333333333u64 & mask) as i64),
+                        context.new_rvalue_from_long(typ, (0x0f0f0f0f0f0f0f0fu64 & mask) as i64),
+                    )
+                } else {
+                    (
+                        context.new_rvalue_from_int(typ, (0x5555u64 & mask) as i32),
+                        context.new_rvalue_from_int(typ, (0x3333u64 & mask) as i32),
+                        context.new_rvalue_from_int(typ, (0x0f0fu64 & mask) as i32),
+                    )
+                };
+                let one = context.new_rvalue_from_int(typ, 1);
+                let two = context.new_rvalue_from_int(typ, 2);
+                let four = context.new_rvalue_from_int(typ, 4);
+
+                // First step.
+                let left = self.lshr(value, one);
+                let left = self.and(left, m0);
+                let right = self.and(value, m0);
+                let right = self.shl(right, one);
+                let step1 = self.or(left, right);
+
+                // Second step.
+                let left = self.lshr(step1, two);
+                let left = self.and(left, m1);
+                let right = self.and(step1, m1);
+                let right = self.shl(right, two);
+                let step2 = self.or(left, right);
+
+                // Third step.
+                let left = self.lshr(step2, four);
+                let left = self.and(left, m2);
+                let right = self.and(step2, m2);
+                let right = self.shl(right, four);
+                let step3 = self.or(left, right);
+
+                // Fourth step.
+                if width == 8 {
+                    step3
+                } else {
+                    self.gcc_bswap(step3, width)
+                }
+            }
+            128 => {
+                // TODO(antoyo): find a more efficient implementation?
+                let sixty_four = self.gcc_int(typ, 64);
+                let right_shift = self.gcc_lshr(value, sixty_four);
+                let high = self.gcc_int_cast(right_shift, self.u64_type);
+                let low = self.gcc_int_cast(value, self.u64_type);
+
+                let reversed_high = self.bit_reverse(64, high);
+                let reversed_low = self.bit_reverse(64, low);
+
+                let new_low = self.gcc_int_cast(reversed_high, typ);
+                let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four);
+
+                self.gcc_or(new_low, new_high, self.location)
+            }
+            _ => {
+                panic!("cannot bit reverse with width = {}", width);
+            }
+        };
 
         self.gcc_int_cast(result, result_type)
     }
@@ -685,56 +728,54 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                 let first_elem = self.context.new_array_access(None, result, zero);
                 let first_value = self.gcc_int_cast(self.context.new_call(None, clzll, &[high]), arg_type);
                 self.llbb()
-                    .add_assignment(None, first_elem, first_value);
+                    .add_assignment(self.location, first_elem, first_value);
 
-                let second_elem = self.context.new_array_access(None, result, one);
-                let cast = self.gcc_int_cast(self.context.new_call(None, clzll, &[low]), arg_type);
+                let second_elem = self.context.new_array_access(self.location, result, one);
+                let cast = self.gcc_int_cast(self.context.new_call(self.location, clzll, &[low]), arg_type);
                 let second_value = self.add(cast, sixty_four);
                 self.llbb()
-                    .add_assignment(None, second_elem, second_value);
+                    .add_assignment(self.location, second_elem, second_value);
 
-                let third_elem = self.context.new_array_access(None, result, two);
+                let third_elem = self.context.new_array_access(self.location, result, two);
                 let third_value = self.const_uint(arg_type, 128);
                 self.llbb()
-                    .add_assignment(None, third_elem, third_value);
+                    .add_assignment(self.location, third_elem, third_value);
 
-                let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
-                let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
+                let not_high = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, high);
+                let not_low = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, low);
                 let not_low_and_not_high = not_low & not_high;
                 let index = not_high + not_low_and_not_high;
                 // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
                 // gcc.
                 // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
                 // compilation stage.
-                let index = self.context.new_cast(None, index, self.i32_type);
+                let index = self.context.new_cast(self.location, index, self.i32_type);
 
-                let res = self.context.new_array_access(None, result, index);
+                let res = self.context.new_array_access(self.location, result, index);
 
                 return self.gcc_int_cast(res.to_rvalue(), arg_type);
             }
             else {
                 let count_leading_zeroes = self.context.get_builtin_function("__builtin_clzll");
-                let arg = self.context.new_cast(None, arg, self.ulonglong_type);
+                let arg = self.context.new_cast(self.location, arg, self.ulonglong_type);
                 let diff = self.ulonglong_type.get_size() as i64 - arg_type.get_size() as i64;
                 let diff = self.context.new_rvalue_from_long(self.int_type, diff * 8);
-                let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
-                return self.context.new_cast(None, res, arg_type);
+                let res = self.context.new_call(self.location, count_leading_zeroes, &[arg]) - diff;
+                return self.context.new_cast(self.location, res, arg_type);
             };
         let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
-        let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
-        self.context.new_cast(None, res, arg_type)
+        let res = self.context.new_call(self.location, count_leading_zeroes, &[arg]);
+        self.context.new_cast(self.location, res, arg_type)
     }
 
     fn count_trailing_zeroes(&mut self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
         let result_type = arg.get_type();
-        let arg =
-            if result_type.is_signed(self.cx) {
-                let new_type = result_type.to_unsigned(self.cx);
-                self.gcc_int_cast(arg, new_type)
-            }
-            else {
-                arg
-            };
+        let arg = if result_type.is_signed(self.cx) {
+            let new_type = result_type.to_unsigned(self.cx);
+            self.gcc_int_cast(arg, new_type)
+        } else {
+            arg
+        };
         let arg_type = arg.get_type();
         let (count_trailing_zeroes, expected_type) =
             // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
@@ -766,58 +807,56 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 
                 let ctzll = self.context.get_builtin_function("__builtin_ctzll");
 
-                let first_elem = self.context.new_array_access(None, result, zero);
-                let first_value = self.gcc_int_cast(self.context.new_call(None, ctzll, &[low]), arg_type);
+                let first_elem = self.context.new_array_access(self.location, result, zero);
+                let first_value = self.gcc_int_cast(self.context.new_call(self.location, ctzll, &[low]), arg_type);
                 self.llbb()
-                    .add_assignment(None, first_elem, first_value);
+                    .add_assignment(self.location, first_elem, first_value);
 
-                let second_elem = self.context.new_array_access(None, result, one);
-                let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(None, ctzll, &[high]), arg_type), sixty_four);
+                let second_elem = self.context.new_array_access(self.location, result, one);
+                let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(self.location, ctzll, &[high]), arg_type), sixty_four);
                 self.llbb()
-                    .add_assignment(None, second_elem, second_value);
+                    .add_assignment(self.location, second_elem, second_value);
 
-                let third_elem = self.context.new_array_access(None, result, two);
+                let third_elem = self.context.new_array_access(self.location, result, two);
                 let third_value = self.gcc_int(arg_type, 128);
                 self.llbb()
-                    .add_assignment(None, third_elem, third_value);
+                    .add_assignment(self.location, third_elem, third_value);
 
-                let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
-                let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
+                let not_low = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, low);
+                let not_high = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, high);
                 let not_low_and_not_high = not_low & not_high;
                 let index = not_low + not_low_and_not_high;
                 // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
                 // gcc.
                 // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
                 // compilation stage.
-                let index = self.context.new_cast(None, index, self.i32_type);
+                let index = self.context.new_cast(self.location, index, self.i32_type);
 
-                let res = self.context.new_array_access(None, result, index);
+                let res = self.context.new_array_access(self.location, result, index);
 
                 return self.gcc_int_cast(res.to_rvalue(), result_type);
             }
             else {
                 let count_trailing_zeroes = self.context.get_builtin_function("__builtin_ctzll");
                 let arg_size = arg_type.get_size();
-                let casted_arg = self.context.new_cast(None, arg, self.ulonglong_type);
+                let casted_arg = self.context.new_cast(self.location, arg, self.ulonglong_type);
                 let byte_diff = self.ulonglong_type.get_size() as i64 - arg_size as i64;
                 let diff = self.context.new_rvalue_from_long(self.int_type, byte_diff * 8);
                 let mask = self.context.new_rvalue_from_long(arg_type, -1); // To get the value with all bits set.
-                let masked = mask & self.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, arg);
-                let cond = self.context.new_comparison(None, ComparisonOp::Equals, masked, mask);
-                let diff = diff * self.context.new_cast(None, cond, self.int_type);
-                let res = self.context.new_call(None, count_trailing_zeroes, &[casted_arg]) - diff;
-                return self.context.new_cast(None, res, result_type);
+                let masked = mask & self.context.new_unary_op(self.location, UnaryOp::BitwiseNegate, arg_type, arg);
+                let cond = self.context.new_comparison(self.location, ComparisonOp::Equals, masked, mask);
+                let diff = diff * self.context.new_cast(self.location, cond, self.int_type);
+                let res = self.context.new_call(self.location, count_trailing_zeroes, &[casted_arg]) - diff;
+                return self.context.new_cast(self.location, res, result_type);
             };
         let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
-        let arg =
-            if arg_type != expected_type {
-                self.context.new_cast(None, arg, expected_type)
-            }
-            else {
-                arg
-            };
-        let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
-        self.context.new_cast(None, res, result_type)
+        let arg = if arg_type != expected_type {
+            self.context.new_cast(self.location, arg, expected_type)
+        } else {
+            arg
+        };
+        let res = self.context.new_call(self.location, count_trailing_zeroes, &[arg]);
+        self.context.new_cast(self.location, res, result_type)
     }
 
     fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> {
@@ -825,13 +864,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let result_type = value.get_type();
         let value_type = result_type.to_unsigned(self.cx);
 
-        let value =
-            if result_type.is_signed(self.cx) {
-                self.gcc_int_cast(value, value_type)
-            }
-            else {
-                value
-            };
+        let value = if result_type.is_signed(self.cx) {
+            self.gcc_int_cast(value, value_type)
+        } else {
+            value
+        };
 
         // only break apart 128-bit ints if they're not natively supported
         // TODO(antoyo): remove this if/when native 128-bit integers land in libgccjit
@@ -859,8 +896,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let counter = self.current_func().new_local(None, counter_type, "popcount_counter");
         let val = self.current_func().new_local(None, value_type, "popcount_value");
         let zero = self.gcc_zero(counter_type);
-        self.llbb().add_assignment(None, counter, zero);
-        self.llbb().add_assignment(None, val, value);
+        self.llbb().add_assignment(self.location, counter, zero);
+        self.llbb().add_assignment(self.location, val, value);
         self.br(loop_head);
 
         // check if value isn't zero
@@ -874,12 +911,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let one = self.gcc_int(value_type, 1);
         let sub = self.gcc_sub(val.to_rvalue(), one);
         let op = self.gcc_and(val.to_rvalue(), sub);
-        loop_body.add_assignment(None, val, op);
+        loop_body.add_assignment(self.location, val, op);
 
         // counter += 1
         let one = self.gcc_int(counter_type, 1);
         let op = self.gcc_add(counter.to_rvalue(), one);
-        loop_body.add_assignment(None, counter, op);
+        loop_body.add_assignment(self.location, counter, op);
         self.br(loop_head);
 
         // end of loop
@@ -888,66 +925,70 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
     }
 
     // Algorithm from: https://blog.regehr.org/archives/1063
-    fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+    fn rotate_left(
+        &mut self,
+        value: RValue<'gcc>,
+        shift: RValue<'gcc>,
+        width: u64,
+    ) -> RValue<'gcc> {
         let max = self.const_uint(shift.get_type(), width);
         let shift = self.urem(shift, max);
         let lhs = self.shl(value, shift);
         let result_neg = self.neg(shift);
-        let result_and =
-            self.and(
-                result_neg,
-                self.const_uint(shift.get_type(), width - 1),
-            );
+        let result_and = self.and(result_neg, self.const_uint(shift.get_type(), width - 1));
         let rhs = self.lshr(value, result_and);
         self.or(lhs, rhs)
     }
 
     // Algorithm from: https://blog.regehr.org/archives/1063
-    fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+    fn rotate_right(
+        &mut self,
+        value: RValue<'gcc>,
+        shift: RValue<'gcc>,
+        width: u64,
+    ) -> RValue<'gcc> {
         let max = self.const_uint(shift.get_type(), width);
         let shift = self.urem(shift, max);
         let lhs = self.lshr(value, shift);
         let result_neg = self.neg(shift);
-        let result_and =
-            self.and(
-                result_neg,
-                self.const_uint(shift.get_type(), width - 1),
-            );
+        let result_and = self.and(result_neg, self.const_uint(shift.get_type(), width - 1));
         let rhs = self.shl(value, result_and);
         self.or(lhs, rhs)
     }
 
-    fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+    fn saturating_add(
+        &mut self,
+        lhs: RValue<'gcc>,
+        rhs: RValue<'gcc>,
+        signed: bool,
+        width: u64,
+    ) -> RValue<'gcc> {
         let result_type = lhs.get_type();
         if signed {
             // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
             let func = self.current_func.borrow().expect("func");
-            let res = func.new_local(None, result_type, "saturating_sum");
+            let res = func.new_local(self.location, result_type, "saturating_sum");
             let supports_native_type = self.is_native_int_type(result_type);
-            let overflow =
-                if supports_native_type {
-                    let func_name =
-                        match width {
-                            8 => "__builtin_add_overflow",
-                            16 => "__builtin_add_overflow",
-                            32 => "__builtin_sadd_overflow",
-                            64 => "__builtin_saddll_overflow",
-                            128 => "__builtin_add_overflow",
-                            _ => unreachable!(),
-                        };
-                    let overflow_func = self.context.get_builtin_function(func_name);
-                    self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
-                }
-                else {
-                    let func_name =
-                        match width {
-                            128 => "__rust_i128_addo",
-                            _ => unreachable!(),
-                        };
-                    let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
-                    self.llbb().add_assignment(None, res, int_result);
-                    overflow
+            let overflow = if supports_native_type {
+                let func_name = match width {
+                    8 => "__builtin_add_overflow",
+                    16 => "__builtin_add_overflow",
+                    32 => "__builtin_sadd_overflow",
+                    64 => "__builtin_saddll_overflow",
+                    128 => "__builtin_add_overflow",
+                    _ => unreachable!(),
+                };
+                let overflow_func = self.context.get_builtin_function(func_name);
+                self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(self.location)], None)
+            } else {
+                let func_name = match width {
+                    128 => "__rust_i128_addo",
+                    _ => unreachable!(),
                 };
+                let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
+                self.llbb().add_assignment(self.location, res, int_result);
+                overflow
+            };
 
             let then_block = func.new_block("then");
             let after_block = func.new_block("after");
@@ -955,83 +996,97 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             // Return `result_type`'s maximum or minimum value on overflow
             // NOTE: convert the type to unsigned to have an unsigned shift.
             let unsigned_type = result_type.to_unsigned(&self.cx);
-            let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
+            let shifted = self.gcc_lshr(
+                self.gcc_int_cast(lhs, unsigned_type),
+                self.gcc_int(unsigned_type, width as i64 - 1),
+            );
             let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
             let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
-            then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
-            then_block.end_with_jump(None, after_block);
+            then_block.add_assignment(
+                self.location,
+                res,
+                self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type),
+            );
+            then_block.end_with_jump(self.location, after_block);
 
-            self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+            self.llbb().end_with_conditional(self.location, overflow, then_block, after_block);
 
             // NOTE: since jumps were added in a place rustc does not
             // expect, the current block in the state need to be updated.
             self.switch_to_block(after_block);
 
             res.to_rvalue()
-        }
-        else {
+        } else {
             // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
             let res = self.gcc_add(lhs, rhs);
             let cond = self.gcc_icmp(IntPredicate::IntULT, res, lhs);
             let value = self.gcc_neg(self.gcc_int_cast(cond, result_type));
-            self.gcc_or(res, value)
+            self.gcc_or(res, value, self.location)
         }
     }
 
     // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
-    fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+    fn saturating_sub(
+        &mut self,
+        lhs: RValue<'gcc>,
+        rhs: RValue<'gcc>,
+        signed: bool,
+        width: u64,
+    ) -> RValue<'gcc> {
         let result_type = lhs.get_type();
         if signed {
             // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
             let func = self.current_func.borrow().expect("func");
-            let res = func.new_local(None, result_type, "saturating_diff");
+            let res = func.new_local(self.location, result_type, "saturating_diff");
             let supports_native_type = self.is_native_int_type(result_type);
-            let overflow =
-                if supports_native_type {
-                    let func_name =
-                        match width {
-                            8 => "__builtin_sub_overflow",
-                            16 => "__builtin_sub_overflow",
-                            32 => "__builtin_ssub_overflow",
-                            64 => "__builtin_ssubll_overflow",
-                            128 => "__builtin_sub_overflow",
-                            _ => unreachable!(),
-                        };
-                    let overflow_func = self.context.get_builtin_function(func_name);
-                    self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
-                }
-                else {
-                    let func_name =
-                        match width {
-                            128 => "__rust_i128_subo",
-                            _ => unreachable!(),
-                        };
-                    let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
-                    self.llbb().add_assignment(None, res, int_result);
-                    overflow
+            let overflow = if supports_native_type {
+                let func_name = match width {
+                    8 => "__builtin_sub_overflow",
+                    16 => "__builtin_sub_overflow",
+                    32 => "__builtin_ssub_overflow",
+                    64 => "__builtin_ssubll_overflow",
+                    128 => "__builtin_sub_overflow",
+                    _ => unreachable!(),
                 };
+                let overflow_func = self.context.get_builtin_function(func_name);
+                self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(self.location)], None)
+            } else {
+                let func_name = match width {
+                    128 => "__rust_i128_subo",
+                    _ => unreachable!(),
+                };
+                let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
+                self.llbb().add_assignment(self.location, res, int_result);
+                overflow
+            };
 
             let then_block = func.new_block("then");
             let after_block = func.new_block("after");
 
             // Return `result_type`'s maximum or minimum value on overflow
             // NOTE: convert the type to unsigned to have an unsigned shift.
-            let unsigned_type = result_type.to_unsigned(&self.cx);
-            let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
+            let unsigned_type = result_type.to_unsigned(self.cx);
+            let shifted = self.gcc_lshr(
+                self.gcc_int_cast(lhs, unsigned_type),
+                self.gcc_int(unsigned_type, width as i64 - 1),
+            );
             let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
             let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
-            then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
-            then_block.end_with_jump(None, after_block);
+            then_block.add_assignment(
+                self.location,
+                res,
+                self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type),
+            );
+            then_block.end_with_jump(self.location, after_block);
 
-            self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+            self.llbb().end_with_conditional(self.location, overflow, then_block, after_block);
 
             // NOTE: since jumps were added in a place rustc does not
             // expect, the current block in the state need to be updated.
             self.switch_to_block(after_block);
 
             res.to_rvalue()
-        }
-        else {
+        } else {
             let res = self.gcc_sub(lhs, rhs);
             let comparison = self.gcc_icmp(IntPredicate::IntULE, res, lhs);
             let value = self.gcc_neg(self.gcc_int_cast(comparison, result_type));
@@ -1040,21 +1095,26 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
     }
 }
 
-fn try_intrinsic<'a, 'b, 'gcc, 'tcx>(bx: &'b mut Builder<'a, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
+fn try_intrinsic<'a, 'b, 'gcc, 'tcx>(
+    bx: &'b mut Builder<'a, 'gcc, 'tcx>,
+    try_func: RValue<'gcc>,
+    data: RValue<'gcc>,
+    _catch_func: RValue<'gcc>,
+    dest: RValue<'gcc>,
+) {
     if bx.sess().panic_strategy() == PanicStrategy::Abort {
         bx.call(bx.type_void(), None, None, try_func, &[data], None);
         // Return 0 unconditionally from the intrinsic call;
         // we can never unwind.
         let ret_align = bx.tcx.data_layout.i32_align.abi;
         bx.store(bx.const_i32(0), dest, ret_align);
-    }
-    else if wants_msvc_seh(bx.sess()) {
-        unimplemented!();
-    }
-    else {
-        #[cfg(feature="master")]
+    } else {
+        if wants_msvc_seh(bx.sess()) {
+            unimplemented!();
+        }
+        #[cfg(feature = "master")]
         codegen_gnu_try(bx, try_func, data, _catch_func, dest);
-        #[cfg(not(feature="master"))]
+        #[cfg(not(feature = "master"))]
         unimplemented!();
     }
 }
@@ -1070,8 +1130,14 @@ fn try_intrinsic<'a, 'b, 'gcc, 'tcx>(bx: &'b mut Builder<'a, 'gcc, 'tcx>, try_fu
 // function calling it, and that function may already have other personality
 // functions in play. By calling a shim we're guaranteed that our shim will have
 // the right personality function.
-#[cfg(feature="master")]
-fn codegen_gnu_try<'gcc>(bx: &mut Builder<'_, 'gcc, '_>, try_func: RValue<'gcc>, data: RValue<'gcc>, catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
+#[cfg(feature = "master")]
+fn codegen_gnu_try<'gcc>(
+    bx: &mut Builder<'_, 'gcc, '_>,
+    try_func: RValue<'gcc>,
+    data: RValue<'gcc>,
+    catch_func: RValue<'gcc>,
+    dest: RValue<'gcc>,
+) {
     let cx: &CodegenCx<'gcc, '_> = bx.cx;
     let (llty, func) = get_rust_try_fn(cx, &mut |mut bx| {
         // Codegens the shims described above:
@@ -1095,7 +1161,7 @@ fn codegen_gnu_try<'gcc>(bx: &mut Builder<'_, 'gcc, '_>, try_func: RValue<'gcc>,
         let catch_func = func.get_param(2).to_rvalue();
         let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
 
-        let current_block = bx.block.clone();
+        let current_block = bx.block;
 
         bx.switch_to_block(then);
         bx.ret(bx.const_i32(0));
@@ -1130,36 +1196,44 @@ fn codegen_gnu_try<'gcc>(bx: &mut Builder<'_, 'gcc, '_>, try_func: RValue<'gcc>,
     bx.store(ret, dest, i32_align);
 }
 
-
 // Helper function used to get a handle to the `__rust_try` function used to
 // catch exceptions.
 //
 // This function is only generated once and is then cached.
-#[cfg(feature="master")]
-fn get_rust_try_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>)) -> (Type<'gcc>, Function<'gcc>) {
+#[cfg(feature = "master")]
+fn get_rust_try_fn<'a, 'gcc, 'tcx>(
+    cx: &'a CodegenCx<'gcc, 'tcx>,
+    codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>),
+) -> (Type<'gcc>, Function<'gcc>) {
     if let Some(llfn) = cx.rust_try_fn.get() {
         return llfn;
     }
 
     // Define the type up front for the signature of the rust_try function.
     let tcx = cx.tcx;
-    let i8p = Ty::new_mut_ptr(tcx,tcx.types.i8);
+    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
     // `unsafe fn(*mut i8) -> ()`
-    let try_fn_ty = Ty::new_fn_ptr(tcx,ty::Binder::dummy(tcx.mk_fn_sig(
-        iter::once(i8p),
-        Ty::new_unit(tcx,),
-        false,
-        rustc_hir::Unsafety::Unsafe,
-        Abi::Rust,
-    )));
+    let try_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            iter::once(i8p),
+            Ty::new_unit(tcx),
+            false,
+            rustc_hir::Unsafety::Unsafe,
+            Abi::Rust,
+        )),
+    );
     // `unsafe fn(*mut i8, *mut i8) -> ()`
-    let catch_fn_ty = Ty::new_fn_ptr(tcx,ty::Binder::dummy(tcx.mk_fn_sig(
-        [i8p, i8p].iter().cloned(),
-        Ty::new_unit(tcx,),
-        false,
-        rustc_hir::Unsafety::Unsafe,
-        Abi::Rust,
-    )));
+    let catch_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            [i8p, i8p].iter().cloned(),
+            Ty::new_unit(tcx),
+            false,
+            rustc_hir::Unsafety::Unsafe,
+            Abi::Rust,
+        )),
+    );
     // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
     let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
         [try_fn_ty, i8p, catch_fn_ty],
@@ -1175,8 +1249,13 @@ fn get_rust_try_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, codegen: &mut
 
 // Helper function to give a Block to a closure to codegen a shim function.
 // This is currently primarily used for the `try` intrinsic functions above.
-#[cfg(feature="master")]
-fn gen_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, name: &str, rust_fn_sig: ty::PolyFnSig<'tcx>, codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>)) -> (Type<'gcc>, Function<'gcc>) {
+#[cfg(feature = "master")]
+fn gen_fn<'a, 'gcc, 'tcx>(
+    cx: &'a CodegenCx<'gcc, 'tcx>,
+    name: &str,
+    rust_fn_sig: ty::PolyFnSig<'tcx>,
+    codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>),
+) -> (Type<'gcc>, Function<'gcc>) {
     let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
     let return_type = fn_abi.gcc_type(cx).return_type;
     // FIXME(eddyb) find a nicer way to do this.
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
index d8091724d86..e9af34059a0 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs
@@ -1,3 +1,5 @@
+use std::iter::FromIterator;
+
 use gccjit::ToRValue;
 use gccjit::{BinaryOp, RValue, Type};
 #[cfg(feature = "master")]
@@ -19,6 +21,8 @@ use rustc_span::{sym, Span, Symbol};
 use rustc_target::abi::Align;
 
 use crate::builder::Builder;
+#[cfg(not(feature = "master"))]
+use crate::common::SignType;
 #[cfg(feature = "master")]
 use crate::context::CodegenCx;
 
@@ -156,6 +160,197 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
         return Ok(compare_simd_types(bx, arg1, arg2, in_elem, llret_ty, cmp_op));
     }
 
+    let simd_bswap = |bx: &mut Builder<'a, 'gcc, 'tcx>, vector: RValue<'gcc>| -> RValue<'gcc> {
+        let v_type = vector.get_type();
+        let vector_type = v_type.unqualified().dyncast_vector().expect("vector type");
+        let elem_type = vector_type.get_element_type();
+        let elem_size_bytes = elem_type.get_size();
+        if elem_size_bytes == 1 {
+            return vector;
+        }
+
+        let type_size_bytes = elem_size_bytes as u64 * in_len;
+        let shuffle_indices = Vec::from_iter(0..type_size_bytes);
+        let byte_vector_type = bx.context.new_vector_type(bx.type_u8(), type_size_bytes);
+        let byte_vector = bx.context.new_bitcast(None, args[0].immediate(), byte_vector_type);
+
+        #[cfg(not(feature = "master"))]
+        let shuffled = {
+            let new_elements: Vec<_> = shuffle_indices
+                .chunks_exact(elem_size_bytes as _)
+                .flat_map(|x| x.iter().rev())
+                .map(|&i| {
+                    let index = bx.context.new_rvalue_from_long(bx.u64_type, i as _);
+                    bx.extract_element(byte_vector, index)
+                })
+                .collect();
+
+            bx.context.new_rvalue_from_vector(None, byte_vector_type, &new_elements)
+        };
+        #[cfg(feature = "master")]
+        let shuffled = {
+            let indices: Vec<_> = shuffle_indices
+                .chunks_exact(elem_size_bytes as _)
+                .flat_map(|x| x.iter().rev())
+                .map(|&i| bx.context.new_rvalue_from_int(bx.u8_type, i as _))
+                .collect();
+
+            let mask = bx.context.new_rvalue_from_vector(None, byte_vector_type, &indices);
+            bx.context.new_rvalue_vector_perm(None, byte_vector, byte_vector, mask)
+        };
+        bx.context.new_bitcast(None, shuffled, v_type)
+    };
+
+    if name == sym::simd_bswap || name == sym::simd_bitreverse {
+        require!(
+            bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
+            InvalidMonomorphization::UnsupportedOperation { span, name, in_ty, in_elem }
+        );
+    }
+
+    if name == sym::simd_bswap {
+        return Ok(simd_bswap(bx, args[0].immediate()));
+    }
+
+    // We use a different algorithm from non-vector bitreverse to take advantage of most
+    // processors' vector shuffle units.  It works like this:
+    // 1. Generate pre-reversed low and high nibbles as a vector.
+    // 2. Byte-swap the input.
+    // 3. Mask off the low and high nibbles of each byte in the byte-swapped input.
+    // 4. Shuffle the pre-reversed low and high-nibbles using the masked nibbles as a shuffle mask.
+    // 5. Combine the results of the shuffle back together and cast back to the original type.
+    #[cfg(feature = "master")]
+    if name == sym::simd_bitreverse {
+        let vector = args[0].immediate();
+        let v_type = vector.get_type();
+        let vector_type = v_type.unqualified().dyncast_vector().expect("vector type");
+        let elem_type = vector_type.get_element_type();
+        let elem_size_bytes = elem_type.get_size();
+
+        let type_size_bytes = elem_size_bytes as u64 * in_len;
+        // We need to ensure at least 16 entries in our vector type, since the pre-reversed vectors
+        // we generate below have 16 entries in them.  `new_rvalue_vector_perm` requires the mask
+        // vector to be of the same length as the source vectors.
+        let byte_vector_type_size = type_size_bytes.max(16);
+
+        let byte_vector_type = bx.context.new_vector_type(bx.u8_type, type_size_bytes);
+        let long_byte_vector_type = bx.context.new_vector_type(bx.u8_type, byte_vector_type_size);
+
+        // Step 1: Generate pre-reversed low and high nibbles as a vector.
+        let zero_byte = bx.context.new_rvalue_zero(bx.u8_type);
+        let hi_nibble_elements: Vec<_> = (0u8..16)
+            .map(|x| bx.context.new_rvalue_from_int(bx.u8_type, x.reverse_bits() as _))
+            .chain((16..byte_vector_type_size).map(|_| zero_byte))
+            .collect();
+        let hi_nibble =
+            bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &hi_nibble_elements);
+
+        let lo_nibble_elements: Vec<_> = (0u8..16)
+            .map(|x| bx.context.new_rvalue_from_int(bx.u8_type, (x.reverse_bits() >> 4) as _))
+            .chain((16..byte_vector_type_size).map(|_| zero_byte))
+            .collect();
+        let lo_nibble =
+            bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &lo_nibble_elements);
+
+        let mask = bx.context.new_rvalue_from_vector(
+            None,
+            long_byte_vector_type,
+            &vec![bx.context.new_rvalue_from_int(bx.u8_type, 0x0f); byte_vector_type_size as _],
+        );
+
+        let four_vec = bx.context.new_rvalue_from_vector(
+            None,
+            long_byte_vector_type,
+            &vec![bx.context.new_rvalue_from_int(bx.u8_type, 4); byte_vector_type_size as _],
+        );
+
+        // Step 2: Byte-swap the input.
+        let swapped = simd_bswap(bx, args[0].immediate());
+        let byte_vector = bx.context.new_bitcast(None, swapped, byte_vector_type);
+
+        // We're going to need to extend the vector with zeros to make sure that the types are the
+        // same, since that's what new_rvalue_vector_perm expects.
+        let byte_vector = if byte_vector_type_size > type_size_bytes {
+            let mut byte_vector_elements = Vec::with_capacity(byte_vector_type_size as _);
+            for i in 0..type_size_bytes {
+                let idx = bx.context.new_rvalue_from_int(bx.u32_type, i as _);
+                let val = bx.extract_element(byte_vector, idx);
+                byte_vector_elements.push(val);
+            }
+            for _ in type_size_bytes..byte_vector_type_size {
+                byte_vector_elements.push(zero_byte);
+            }
+            bx.context.new_rvalue_from_vector(None, long_byte_vector_type, &byte_vector_elements)
+        } else {
+            bx.context.new_bitcast(None, byte_vector, long_byte_vector_type)
+        };
+
+        // Step 3: Mask off the low and high nibbles of each byte in the byte-swapped input.
+        let masked_hi = (byte_vector >> four_vec) & mask;
+        let masked_lo = byte_vector & mask;
+
+        // Step 4: Shuffle the pre-reversed low and high-nibbles using the masked nibbles as a shuffle mask.
+        let hi = bx.context.new_rvalue_vector_perm(None, hi_nibble, hi_nibble, masked_lo);
+        let lo = bx.context.new_rvalue_vector_perm(None, lo_nibble, lo_nibble, masked_hi);
+
+        // Step 5: Combine the results of the shuffle back together and cast back to the original type.
+        let result = hi | lo;
+        let cast_ty =
+            bx.context.new_vector_type(elem_type, byte_vector_type_size / (elem_size_bytes as u64));
+
+        // we might need to truncate if sizeof(v_type) < sizeof(cast_type)
+        if type_size_bytes < byte_vector_type_size {
+            let cast_result = bx.context.new_bitcast(None, result, cast_ty);
+            let elems: Vec<_> = (0..in_len)
+                .map(|i| {
+                    let idx = bx.context.new_rvalue_from_int(bx.u32_type, i as _);
+                    bx.extract_element(cast_result, idx)
+                })
+                .collect();
+            return Ok(bx.context.new_rvalue_from_vector(None, v_type, &elems));
+        } else {
+            // avoid the unnecessary truncation as an optimization.
+            return Ok(bx.context.new_bitcast(None, result, v_type));
+        }
+    }
+    // since gcc doesn't have vector shuffle methods available in non-patched builds, fallback to
+    // component-wise bitreverses if they're not available.
+    #[cfg(not(feature = "master"))]
+    if name == sym::simd_bitreverse {
+        let vector = args[0].immediate();
+        let vector_ty = vector.get_type();
+        let vector_type = vector_ty.unqualified().dyncast_vector().expect("vector type");
+        let num_elements = vector_type.get_num_units();
+
+        let elem_type = vector_type.get_element_type();
+        let elem_size_bytes = elem_type.get_size();
+        let num_type = elem_type.to_unsigned(bx.cx);
+        let new_elements: Vec<_> = (0..num_elements)
+            .map(|idx| {
+                let index = bx.context.new_rvalue_from_long(num_type, idx as _);
+                let extracted_value = bx.extract_element(vector, index).to_rvalue();
+                bx.bit_reverse(elem_size_bytes as u64 * 8, extracted_value)
+            })
+            .collect();
+        return Ok(bx.context.new_rvalue_from_vector(None, vector_ty, &new_elements));
+    }
+
+    if name == sym::simd_ctlz || name == sym::simd_cttz {
+        let vector = args[0].immediate();
+        let elements: Vec<_> = (0..in_len)
+            .map(|i| {
+                let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
+                let value = bx.extract_element(vector, index).to_rvalue();
+                if name == sym::simd_ctlz {
+                    bx.count_leading_zeroes(value.get_type().get_size() as u64 * 8, value)
+                } else {
+                    bx.count_trailing_zeroes(value.get_type().get_size() as u64 * 8, value)
+                }
+            })
+            .collect();
+        return Ok(bx.context.new_rvalue_from_vector(None, vector.get_type(), &elements));
+    }
+
     if name == sym::simd_shuffle {
         // Make sure this is actually an array, since typeck only checks the length-suffixed
         // version of this intrinsic.
@@ -504,20 +699,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
         default: RValue<'gcc>,
         pointers: RValue<'gcc>,
         mask: RValue<'gcc>,
-        pointer_count: usize,
         bx: &mut Builder<'a, 'gcc, 'tcx>,
         in_len: u64,
-        underlying_ty: Ty<'tcx>,
         invert: bool,
     ) -> RValue<'gcc> {
-        let vector_type = if pointer_count > 1 {
-            bx.context.new_vector_type(bx.usize_type, in_len)
-        } else {
-            vector_ty(bx, underlying_ty, in_len)
-        };
-        let elem_type = vector_type.dyncast_vector().expect("vector type").get_element_type();
+        let vector_type = default.get_type();
+        let elem_type =
+            vector_type.unqualified().dyncast_vector().expect("vector type").get_element_type();
 
-        let mut values = vec![];
+        let mut values = Vec::with_capacity(in_len as usize);
         for i in 0..in_len {
             let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
             let int = bx.context.new_vector_access(None, pointers, index).to_rvalue();
@@ -530,13 +720,15 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
 
         let vector = bx.context.new_rvalue_from_vector(None, vector_type, &values);
 
-        let mut mask_types = vec![];
-        let mut mask_values = vec![];
+        let mut mask_types = Vec::with_capacity(in_len as usize);
+        let mut mask_values = Vec::with_capacity(in_len as usize);
         for i in 0..in_len {
             let index = bx.context.new_rvalue_from_long(bx.i32_type, i as i64);
             mask_types.push(bx.context.new_field(None, bx.i32_type, "m"));
             let mask_value = bx.context.new_vector_access(None, mask, index).to_rvalue();
-            let masked = bx.context.new_rvalue_from_int(bx.i32_type, in_len as i32) & mask_value;
+            let mask_value_cast = bx.context.new_cast(None, mask_value, bx.i32_type);
+            let masked =
+                bx.context.new_rvalue_from_int(bx.i32_type, in_len as i32) & mask_value_cast;
             let value = index + masked;
             mask_values.push(value);
         }
@@ -665,10 +857,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
             args[0].immediate(),
             args[1].immediate(),
             args[2].immediate(),
-            pointer_count,
             bx,
             in_len,
-            underlying_ty,
             false,
         ));
     }
@@ -779,16 +969,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>(
             }
         }
 
-        let result = gather(
-            args[0].immediate(),
-            args[1].immediate(),
-            args[2].immediate(),
-            pointer_count,
-            bx,
-            in_len,
-            underlying_ty,
-            true,
-        );
+        let result =
+            gather(args[0].immediate(), args[1].immediate(), args[2].immediate(), bx, in_len, true);
 
         let pointers = args[1].immediate();
 
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
index 09ce059476e..a4ee3015b8d 100644
--- a/compiler/rustc_codegen_gcc/src/lib.rs
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -4,6 +4,7 @@
  * TODO(antoyo): support LTO (gcc's equivalent to Full LTO is -flto -flto-partition=one — https://documentation.suse.com/sbp/all/html/SBP-GCC-10/index.html).
  * For Thin LTO, this might be helpful:
  * In gcc 4.6 -fwhopr was removed and became default with -flto. The non-whopr path can still be executed via -flto-partition=none.
+ * Or the new incremental LTO?
  *
  * Maybe some missing optizations enabled by rustc's LTO is in there: https://gcc.gnu.org/onlinedocs/gcc/Optimize-Options.html
  * Like -fipa-icf (should be already enabled) and maybe -fdevirtualize-at-ltrans.
@@ -24,9 +25,10 @@
     hash_raw_entry
 )]
 #![allow(broken_intra_doc_links)]
-#![recursion_limit="256"]
+#![recursion_limit = "256"]
 #![warn(rust_2018_idioms)]
 #![warn(unused_lifetimes)]
+#![deny(clippy::pattern_type_mismatch)]
 
 extern crate rustc_apfloat;
 extern crate rustc_ast;
@@ -37,7 +39,8 @@ extern crate rustc_errors;
 extern crate rustc_fluent_macro;
 extern crate rustc_fs_util;
 extern crate rustc_hir;
-#[cfg(feature="master")]
+extern crate rustc_index;
+#[cfg(feature = "master")]
 extern crate rustc_interface;
 extern crate rustc_macros;
 extern crate rustc_metadata;
@@ -77,36 +80,40 @@ mod type_of;
 
 use std::any::Any;
 use std::fmt::Debug;
-use std::sync::Arc;
-use std::sync::Mutex;
-#[cfg(not(feature="master"))]
+#[cfg(not(feature = "master"))]
 use std::sync::atomic::AtomicBool;
-#[cfg(not(feature="master"))]
+#[cfg(not(feature = "master"))]
 use std::sync::atomic::Ordering;
+use std::sync::Arc;
+use std::sync::Mutex;
 
+use errors::LTONotSupported;
+#[cfg(not(feature = "master"))]
+use gccjit::CType;
 use gccjit::{Context, OptimizationLevel};
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use gccjit::{TargetInfo, Version};
-#[cfg(not(feature="master"))]
-use gccjit::CType;
-use errors::LTONotSupported;
 use rustc_ast::expand::allocator::AllocatorKind;
-use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
-use rustc_codegen_ssa::base::codegen_crate;
-use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn};
 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
+use rustc_codegen_ssa::back::write::{
+    CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryFn,
+};
+use rustc_codegen_ssa::base::codegen_crate;
+use rustc_codegen_ssa::traits::{
+    CodegenBackend, ExtraBackendMethods, ThinBufferMethods, WriteBackendMethods,
+};
+use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen};
 use rustc_data_structures::fx::FxIndexMap;
 use rustc_data_structures::sync::IntoDynSyncSend;
-use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ThinBufferMethods, WriteBackendMethods};
-use rustc_errors::{ErrorGuaranteed, DiagCtxt};
+use rustc_errors::{DiagCtxt, ErrorGuaranteed};
 use rustc_metadata::EncodedMetadata;
 use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
-use rustc_middle::util::Providers;
 use rustc_middle::ty::TyCtxt;
+use rustc_middle::util::Providers;
 use rustc_session::config::{Lto, OptLevel, OutputFilenames};
 use rustc_session::Session;
-use rustc_span::Symbol;
 use rustc_span::fatal_error::FatalError;
+use rustc_span::Symbol;
 use tempfile::TempDir;
 
 use crate::back::lto::ModuleBuffer;
@@ -124,13 +131,13 @@ impl<F: Fn() -> String> Drop for PrintOnPanic<F> {
     }
 }
 
-#[cfg(not(feature="master"))]
+#[cfg(not(feature = "master"))]
 #[derive(Debug)]
 pub struct TargetInfo {
     supports_128bit_integers: AtomicBool,
 }
 
-#[cfg(not(feature="master"))]
+#[cfg(not(feature = "master"))]
 impl TargetInfo {
     fn cpu_supports(&self, _feature: &str) -> bool {
         false
@@ -173,26 +180,26 @@ impl CodegenBackend for GccCodegenBackend {
     }
 
     fn init(&self, sess: &Session) {
-        #[cfg(feature="master")]
+        #[cfg(feature = "master")]
         {
             let target_cpu = target_cpu(sess);
 
             // Get the second TargetInfo with the correct CPU features by setting the arch.
             let context = Context::default();
             if target_cpu != "generic" {
-                context.add_command_line_option(&format!("-march={}", target_cpu));
+                context.add_command_line_option(format!("-march={}", target_cpu));
             }
 
             **self.target_info.info.lock().expect("lock") = context.get_target_info();
         }
 
-        #[cfg(feature="master")]
+        #[cfg(feature = "master")]
         gccjit::set_global_personality_function_name(b"rust_eh_personality\0");
         if sess.lto() == Lto::Thin {
             sess.dcx().emit_warn(LTONotSupported {});
         }
 
-        #[cfg(not(feature="master"))]
+        #[cfg(not(feature = "master"))]
         {
             let temp_dir = TempDir::new().expect("cannot create temporary directory");
             let temp_file = temp_dir.into_path().join("result.asm");
@@ -200,39 +207,62 @@ impl CodegenBackend for GccCodegenBackend {
             check_context.set_print_errors_to_stderr(false);
             let _int128_ty = check_context.new_c_type(CType::UInt128t);
             // NOTE: we cannot just call compile() as this would require other files than libgccjit.so.
-            check_context.compile_to_file(gccjit::OutputKind::Assembler, temp_file.to_str().expect("path to str"));
-            self.target_info.info.lock().expect("lock").supports_128bit_integers.store(check_context.get_last_error() == Ok(None), Ordering::SeqCst);
+            check_context.compile_to_file(
+                gccjit::OutputKind::Assembler,
+                temp_file.to_str().expect("path to str"),
+            );
+            self.target_info
+                .info
+                .lock()
+                .expect("lock")
+                .supports_128bit_integers
+                .store(check_context.get_last_error() == Ok(None), Ordering::SeqCst);
         }
     }
 
     fn provide(&self, providers: &mut Providers) {
-        providers.global_backend_features =
-            |tcx, ()| gcc_util::global_gcc_features(tcx.sess, true)
+        providers.global_backend_features = |tcx, ()| gcc_util::global_gcc_features(tcx.sess, true)
     }
 
-    fn codegen_crate<'tcx>(&self, tcx: TyCtxt<'tcx>, metadata: EncodedMetadata, need_metadata_module: bool) -> Box<dyn Any> {
+    fn codegen_crate(
+        &self,
+        tcx: TyCtxt<'_>,
+        metadata: EncodedMetadata,
+        need_metadata_module: bool,
+    ) -> Box<dyn Any> {
         let target_cpu = target_cpu(tcx.sess);
-        let res = codegen_crate(self.clone(), tcx, target_cpu.to_string(), metadata, need_metadata_module);
+        let res = codegen_crate(
+            self.clone(),
+            tcx,
+            target_cpu.to_string(),
+            metadata,
+            need_metadata_module,
+        );
 
         Box::new(res)
     }
 
-    fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session, _outputs: &OutputFilenames) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
+    fn join_codegen(
+        &self,
+        ongoing_codegen: Box<dyn Any>,
+        sess: &Session,
+        _outputs: &OutputFilenames,
+    ) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
         ongoing_codegen
             .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>()
             .expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>")
             .join(sess)
     }
 
-    fn link(&self, sess: &Session, codegen_results: CodegenResults, outputs: &OutputFilenames) -> Result<(), ErrorGuaranteed> {
+    fn link(
+        &self,
+        sess: &Session,
+        codegen_results: CodegenResults,
+        outputs: &OutputFilenames,
+    ) -> Result<(), ErrorGuaranteed> {
         use rustc_codegen_ssa::back::link::link_binary;
 
-        link_binary(
-            sess,
-            &crate::archive::ArArchiveBuilderBuilder,
-            &codegen_results,
-            outputs,
-        )
+        link_binary(sess, &crate::archive::ArArchiveBuilderBuilder, &codegen_results, outputs)
     }
 
     fn target_features(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> {
@@ -245,13 +275,15 @@ fn new_context<'gcc, 'tcx>(tcx: TyCtxt<'tcx>) -> Context<'gcc> {
     if tcx.sess.target.arch == "x86" || tcx.sess.target.arch == "x86_64" {
         context.add_command_line_option("-masm=intel");
     }
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     {
+        context.set_special_chars_allowed_in_func_names("$.*");
         let version = Version::get();
         let version = format!("{}.{}.{}", version.major, version.minor, version.patch);
-        context.set_output_ident(&format!("rustc version {} with libgccjit {}",
-                rustc_interface::util::rustc_version_str().unwrap_or("unknown version"),
-                version,
+        context.set_output_ident(&format!(
+            "rustc version {} with libgccjit {}",
+            rustc_interface::util::rustc_version_str().unwrap_or("unknown version"),
+            version,
         ));
     }
     // TODO(antoyo): check if this should only be added when using -Cforce-unwind-tables=n.
@@ -260,26 +292,41 @@ fn new_context<'gcc, 'tcx>(tcx: TyCtxt<'tcx>) -> Context<'gcc> {
 }
 
 impl ExtraBackendMethods for GccCodegenBackend {
-    fn codegen_allocator<'tcx>(&self, tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind) -> Self::Module {
+    fn codegen_allocator(
+        &self,
+        tcx: TyCtxt<'_>,
+        module_name: &str,
+        kind: AllocatorKind,
+        alloc_error_handler_kind: AllocatorKind,
+    ) -> Self::Module {
         let mut mods = GccContext {
             context: new_context(tcx),
             should_combine_object_files: false,
             temp_dir: None,
         };
 
-        unsafe { allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind); }
+        unsafe {
+            allocator::codegen(tcx, &mut mods, module_name, kind, alloc_error_handler_kind);
+        }
         mods
     }
 
-    fn compile_codegen_unit(&self, tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen<Self::Module>, u64) {
+    fn compile_codegen_unit(
+        &self,
+        tcx: TyCtxt<'_>,
+        cgu_name: Symbol,
+    ) -> (ModuleCodegen<Self::Module>, u64) {
         base::compile_codegen_unit(tcx, cgu_name, self.target_info.clone())
     }
 
-    fn target_machine_factory(&self, _sess: &Session, _opt_level: OptLevel, _features: &[String]) -> TargetMachineFactoryFn<Self> {
+    fn target_machine_factory(
+        &self,
+        _sess: &Session,
+        _opt_level: OptLevel,
+        _features: &[String],
+    ) -> TargetMachineFactoryFn<Self> {
         // TODO(antoyo): set opt level.
-        Arc::new(|_| {
-            Ok(())
-        })
+        Arc::new(|_| Ok(()))
     }
 }
 
@@ -310,11 +357,19 @@ impl WriteBackendMethods for GccCodegenBackend {
     type ThinData = ();
     type ThinBuffer = ThinBuffer;
 
-    fn run_fat_lto(cgcx: &CodegenContext<Self>, modules: Vec<FatLtoInput<Self>>, cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<LtoModuleCodegen<Self>, FatalError> {
+    fn run_fat_lto(
+        cgcx: &CodegenContext<Self>,
+        modules: Vec<FatLtoInput<Self>>,
+        cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+    ) -> Result<LtoModuleCodegen<Self>, FatalError> {
         back::lto::run_fat(cgcx, modules, cached_modules)
     }
 
-    fn run_thin_lto(_cgcx: &CodegenContext<Self>, _modules: Vec<(String, Self::ThinBuffer)>, _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
+    fn run_thin_lto(
+        _cgcx: &CodegenContext<Self>,
+        _modules: Vec<(String, Self::ThinBuffer)>,
+        _cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
+    ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
         unimplemented!();
     }
 
@@ -326,21 +381,37 @@ impl WriteBackendMethods for GccCodegenBackend {
         unimplemented!()
     }
 
-    unsafe fn optimize(_cgcx: &CodegenContext<Self>, _dcx: &DiagCtxt, module: &ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<(), FatalError> {
+    unsafe fn optimize(
+        _cgcx: &CodegenContext<Self>,
+        _dcx: &DiagCtxt,
+        module: &ModuleCodegen<Self::Module>,
+        config: &ModuleConfig,
+    ) -> Result<(), FatalError> {
         module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level));
         Ok(())
     }
 
-    fn optimize_fat(_cgcx: &CodegenContext<Self>, _module: &mut ModuleCodegen<Self::Module>) -> Result<(), FatalError> {
+    fn optimize_fat(
+        _cgcx: &CodegenContext<Self>,
+        _module: &mut ModuleCodegen<Self::Module>,
+    ) -> Result<(), FatalError> {
         // TODO(antoyo)
         Ok(())
     }
 
-    unsafe fn optimize_thin(_cgcx: &CodegenContext<Self>, _thin: ThinModule<Self>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+    unsafe fn optimize_thin(
+        _cgcx: &CodegenContext<Self>,
+        _thin: ThinModule<Self>,
+    ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
         unimplemented!();
     }
 
-    unsafe fn codegen(cgcx: &CodegenContext<Self>, dcx: &DiagCtxt, module: ModuleCodegen<Self::Module>, config: &ModuleConfig) -> Result<CompiledModule, FatalError> {
+    unsafe fn codegen(
+        cgcx: &CodegenContext<Self>,
+        dcx: &DiagCtxt,
+        module: ModuleCodegen<Self::Module>,
+        config: &ModuleConfig,
+    ) -> Result<CompiledModule, FatalError> {
         back::write::codegen(cgcx, dcx, module, config)
     }
 
@@ -352,7 +423,11 @@ impl WriteBackendMethods for GccCodegenBackend {
         unimplemented!();
     }
 
-    fn run_link(cgcx: &CodegenContext<Self>, dcx: &DiagCtxt, modules: Vec<ModuleCodegen<Self::Module>>) -> Result<ModuleCodegen<Self::Module>, FatalError> {
+    fn run_link(
+        cgcx: &CodegenContext<Self>,
+        dcx: &DiagCtxt,
+        modules: Vec<ModuleCodegen<Self::Module>>,
+    ) -> Result<ModuleCodegen<Self::Module>, FatalError> {
         back::write::link(cgcx, dcx, modules)
     }
 }
@@ -360,56 +435,57 @@ impl WriteBackendMethods for GccCodegenBackend {
 /// This is the entrypoint for a hot plugged rustc_codegen_gccjit
 #[no_mangle]
 pub fn __rustc_codegen_backend() -> Box<dyn CodegenBackend> {
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     let info = {
         // Check whether the target supports 128-bit integers.
         let context = Context::default();
         Arc::new(Mutex::new(IntoDynSyncSend(context.get_target_info())))
     };
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     let info = Arc::new(Mutex::new(IntoDynSyncSend(TargetInfo {
         supports_128bit_integers: AtomicBool::new(false),
     })));
 
-    Box::new(GccCodegenBackend {
-        target_info: LockedTargetInfo { info },
-    })
+    Box::new(GccCodegenBackend { target_info: LockedTargetInfo { info } })
 }
 
 fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel {
     match optlevel {
         None => OptimizationLevel::None,
-        Some(level) => {
-            match level {
-                OptLevel::No => OptimizationLevel::None,
-                OptLevel::Less => OptimizationLevel::Limited,
-                OptLevel::Default => OptimizationLevel::Standard,
-                OptLevel::Aggressive => OptimizationLevel::Aggressive,
-                OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited,
-            }
+        Some(level) => match level {
+            OptLevel::No => OptimizationLevel::None,
+            OptLevel::Less => OptimizationLevel::Limited,
+            OptLevel::Default => OptimizationLevel::Standard,
+            OptLevel::Aggressive => OptimizationLevel::Aggressive,
+            OptLevel::Size | OptLevel::SizeMin => OptimizationLevel::Limited,
         },
     }
 }
 
-pub fn target_features(sess: &Session, allow_unstable: bool, target_info: &LockedTargetInfo) -> Vec<Symbol> {
-    sess
-        .target
+pub fn target_features(
+    sess: &Session,
+    allow_unstable: bool,
+    target_info: &LockedTargetInfo,
+) -> Vec<Symbol> {
+    sess.target
         .supported_target_features()
         .iter()
-        .filter_map(
-            |&(feature, gate)| {
-                if sess.is_nightly_build() || allow_unstable || gate.is_stable() { Some(feature) } else { None }
-            },
-        )
+        .filter_map(|&(feature, gate)| {
+            if sess.is_nightly_build() || allow_unstable || gate.is_stable() {
+                Some(feature)
+            } else {
+                None
+            }
+        })
         .filter(|_feature| {
             target_info.cpu_supports(_feature)
             /*
-               adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512fp16, avx512ifma,
-               avx512pf, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpopcntdq,
-               bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, gfni, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm,
-               sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, vaes, vpclmulqdq, xsave, xsavec, xsaveopt, xsaves
-             */
+              adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512fp16, avx512ifma,
+              avx512pf, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpopcntdq,
+              bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, gfni, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm,
+              sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, vaes, vpclmulqdq, xsave, xsavec, xsaveopt, xsaves
+            */
         })
-        .map(|feature| Symbol::intern(feature))
+        .map(Symbol::intern)
         .collect()
 }
diff --git a/compiler/rustc_codegen_gcc/src/mono_item.rs b/compiler/rustc_codegen_gcc/src/mono_item.rs
index 3322d56513b..e56c49686c0 100644
--- a/compiler/rustc_codegen_gcc/src/mono_item.rs
+++ b/compiler/rustc_codegen_gcc/src/mono_item.rs
@@ -1,11 +1,11 @@
-#[cfg(feature="master")]
-use gccjit::{VarAttribute, FnAttribute};
+#[cfg(feature = "master")]
+use gccjit::{FnAttribute, VarAttribute};
 use rustc_codegen_ssa::traits::PreDefineMethods;
 use rustc_hir::def_id::{DefId, LOCAL_CRATE};
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
 use rustc_middle::mir::mono::{Linkage, Visibility};
-use rustc_middle::ty::{self, Instance, TypeVisitableExt};
 use rustc_middle::ty::layout::{FnAbiOf, LayoutOf};
+use rustc_middle::ty::{self, Instance, TypeVisitableExt};
 
 use crate::attributes;
 use crate::base;
@@ -13,8 +13,14 @@ use crate::context::CodegenCx;
 use crate::type_of::LayoutGccExt;
 
 impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
-    #[cfg_attr(not(feature="master"), allow(unused_variables))]
-    fn predefine_static(&self, def_id: DefId, _linkage: Linkage, visibility: Visibility, symbol_name: &str) {
+    #[cfg_attr(not(feature = "master"), allow(unused_variables))]
+    fn predefine_static(
+        &self,
+        def_id: DefId,
+        _linkage: Linkage,
+        visibility: Visibility,
+        symbol_name: &str,
+    ) {
         let attrs = self.tcx.codegen_fn_attrs(def_id);
         let instance = Instance::mono(self.tcx, def_id);
         let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
@@ -22,20 +28,26 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
 
         let is_tls = attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL);
         let global = self.define_global(symbol_name, gcc_type, is_tls, attrs.link_section);
-        #[cfg(feature="master")]
-        global.add_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility)));
+        #[cfg(feature = "master")]
+        global.add_string_attribute(VarAttribute::Visibility(base::visibility_to_gcc(visibility)));
 
         // TODO(antoyo): set linkage.
         self.instances.borrow_mut().insert(instance, global);
     }
 
-    #[cfg_attr(not(feature="master"), allow(unused_variables))]
-    fn predefine_fn(&self, instance: Instance<'tcx>, linkage: Linkage, visibility: Visibility, symbol_name: &str) {
+    #[cfg_attr(not(feature = "master"), allow(unused_variables))]
+    fn predefine_fn(
+        &self,
+        instance: Instance<'tcx>,
+        linkage: Linkage,
+        visibility: Visibility,
+        symbol_name: &str,
+    ) {
         assert!(!instance.args.has_infer());
 
         let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty());
         self.linkage.set(base::linkage_to_gcc(linkage));
-        let decl = self.declare_fn(symbol_name, &fn_abi);
+        let decl = self.declare_fn(symbol_name, fn_abi);
         //let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
 
         attributes::from_fn_attrs(self, decl, instance);
@@ -48,11 +60,10 @@ impl<'gcc, 'tcx> PreDefineMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
             && linkage != Linkage::Private
             && self.tcx.is_compiler_builtins(LOCAL_CRATE)
         {
-            #[cfg(feature="master")]
+            #[cfg(feature = "master")]
             decl.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden));
-        }
-        else {
-            #[cfg(feature="master")]
+        } else {
+            #[cfg(feature = "master")]
             decl.add_attribute(FnAttribute::Visibility(base::visibility_to_gcc(visibility)));
         }
 
diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs
index 7e5aa1c1766..c8e6ae69bd9 100644
--- a/compiler/rustc_codegen_gcc/src/type_.rs
+++ b/compiler/rustc_codegen_gcc/src/type_.rs
@@ -1,8 +1,8 @@
 use gccjit::{RValue, Struct, Type};
-use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, TypeMembershipMethods};
 use rustc_codegen_ssa::common::TypeKind;
-use rustc_middle::{bug, ty};
+use rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, TypeMembershipMethods};
 use rustc_middle::ty::layout::TyAndLayout;
+use rustc_middle::{bug, ty};
 use rustc_target::abi::{AddressSpace, Align, Integer, Size};
 
 use crate::common::TypeReflection;
@@ -123,7 +123,7 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
     fn type_f16(&self) -> Type<'gcc> {
         unimplemented!("f16_f128")
     }
-    
+
     fn type_f32(&self) -> Type<'gcc> {
         self.float_type
     }
@@ -136,6 +136,10 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
         unimplemented!("f16_f128")
     }
 
+    fn type_f128(&self) -> Type<'gcc> {
+        unimplemented!("f16_f128")
+    }
+
     fn type_func(&self, params: &[Type<'gcc>], return_type: Type<'gcc>) -> Type<'gcc> {
         self.context.new_function_pointer_type(None, return_type, params, false)
     }
@@ -143,14 +147,18 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
     fn type_struct(&self, fields: &[Type<'gcc>], packed: bool) -> Type<'gcc> {
         let types = fields.to_vec();
         if let Some(typ) = self.struct_types.borrow().get(fields) {
-            return typ.clone();
+            return *typ;
         }
-        let fields: Vec<_> = fields.iter().enumerate()
-            .map(|(index, field)| self.context.new_field(None, *field, &format!("field{}_TODO", index)))
+        let fields: Vec<_> = fields
+            .iter()
+            .enumerate()
+            .map(|(index, field)| {
+                self.context.new_field(None, *field, format!("field{}_TODO", index))
+            })
             .collect();
         let typ = self.context.new_struct_type(None, "struct", &fields).as_type();
         if packed {
-            #[cfg(feature="master")]
+            #[cfg(feature = "master")]
             typ.set_packed();
         }
         self.struct_types.borrow_mut().insert(types, typ);
@@ -160,17 +168,13 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
     fn type_kind(&self, typ: Type<'gcc>) -> TypeKind {
         if self.is_int_type_or_bool(typ) {
             TypeKind::Integer
-        }
-        else if typ.is_compatible_with(self.float_type) {
+        } else if typ.is_compatible_with(self.float_type) {
             TypeKind::Float
-        }
-        else if typ.is_compatible_with(self.double_type) {
+        } else if typ.is_compatible_with(self.double_type) {
             TypeKind::Double
-        }
-        else if typ.is_vector() {
+        } else if typ.is_vector() {
             TypeKind::Vector
-        }
-        else {
+        } else {
             // TODO(antoyo): support other types.
             TypeKind::Void
         }
@@ -187,14 +191,11 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
     fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {
         if let Some(typ) = ty.dyncast_array() {
             typ
-        }
-        else if let Some(vector_type) = ty.dyncast_vector() {
+        } else if let Some(vector_type) = ty.dyncast_vector() {
             vector_type.get_element_type()
-        }
-        else if let Some(typ) = ty.get_pointee() {
+        } else if let Some(typ) = ty.get_pointee() {
             typ
-        }
-        else {
+        } else {
             unreachable!()
         }
     }
@@ -208,11 +209,9 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
         let f64 = self.context.new_type::<f64>();
         if typ.is_compatible_with(f32) {
             32
-        }
-        else if typ.is_compatible_with(f64) {
+        } else if typ.is_compatible_with(f64) {
             64
-        }
-        else {
+        } else {
             panic!("Cannot get width of float type {:?}", typ);
         }
         // TODO(antoyo): support other sizes.
@@ -226,9 +225,9 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
         value.get_type()
     }
 
-    #[cfg_attr(feature="master", allow(unused_mut))]
+    #[cfg_attr(feature = "master", allow(unused_mut))]
     fn type_array(&self, ty: Type<'gcc>, mut len: u64) -> Type<'gcc> {
-        #[cfg(not(feature="master"))]
+        #[cfg(not(feature = "master"))]
         if let Some(struct_type) = ty.is_struct() {
             if struct_type.get_field_count() == 0 {
                 // NOTE: since gccjit only supports i32 for the array size and libcore's tests uses a
@@ -252,12 +251,14 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
     }
 
     pub fn set_struct_body(&self, typ: Struct<'gcc>, fields: &[Type<'gcc>], packed: bool) {
-        let fields: Vec<_> = fields.iter().enumerate()
-            .map(|(index, field)| self.context.new_field(None, *field, &format!("field_{}", index)))
+        let fields: Vec<_> = fields
+            .iter()
+            .enumerate()
+            .map(|(index, field)| self.context.new_field(None, *field, format!("field_{}", index)))
             .collect();
         typ.set_fields(None, &fields);
         if packed {
-            #[cfg(feature="master")]
+            #[cfg(feature = "master")]
             typ.as_type().set_packed();
         }
     }
@@ -267,7 +268,10 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
     }
 }
 
-pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>) -> (Vec<Type<'gcc>>, bool) {
+pub fn struct_fields<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    layout: TyAndLayout<'tcx>,
+) -> (Vec<Type<'gcc>>, bool) {
     let field_count = layout.fields.count();
 
     let mut packed = false;
@@ -275,7 +279,7 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
     let mut prev_effective_align = layout.align.abi;
     let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
     for i in layout.fields.index_by_increasing_offset() {
-        let target_offset = layout.fields.offset(i as usize);
+        let target_offset = layout.fields.offset(i);
         let field = layout.field(cx, i);
         let effective_field_align =
             layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
@@ -305,5 +309,4 @@ pub fn struct_fields<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
     (result, packed)
 }
 
-impl<'gcc, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
-}
+impl<'gcc, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'gcc, 'tcx> {}
diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs
index 5a9212762b7..8f9bfbbd18f 100644
--- a/compiler/rustc_codegen_gcc/src/type_of.rs
+++ b/compiler/rustc_codegen_gcc/src/type_of.rs
@@ -1,13 +1,16 @@
 use std::fmt::Write;
 
-use gccjit::{Struct, Type};
 use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods};
+use gccjit::{Struct, Type};
 use rustc_middle::bug;
-use rustc_middle::ty::{self, Ty, TypeVisitableExt};
 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
 use rustc_middle::ty::print::with_no_trimmed_paths;
-use rustc_target::abi::{self, Abi, Align, F16, F128, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants};
+use rustc_middle::ty::{self, Ty, TypeVisitableExt};
 use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
+use rustc_target::abi::{
+    self, Abi, Align, FieldsShape, Int, Integer, PointeeInfo, Pointer, Size, TyAbiInterface,
+    Variants, F128, F16, F32, F64,
+};
 
 use crate::abi::{FnAbiGcc, FnAbiGccExt, GccType};
 use crate::context::CodegenCx;
@@ -25,7 +28,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         }
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn type_int_from_ty(&self, t: ty::IntTy) -> Type<'gcc> {
         match t {
             ty::IntTy::Isize => self.type_isize(),
@@ -37,7 +40,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         }
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn type_uint_from_ty(&self, t: ty::UintTy) -> Type<'gcc> {
         match t {
             ty::UintTy::Usize => self.type_isize(),
@@ -56,7 +59,11 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
     }
 }
 
-fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>) -> Type<'gcc> {
+fn uncached_gcc_type<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    layout: TyAndLayout<'tcx>,
+    defer: &mut Option<(Struct<'gcc>, TyAndLayout<'tcx>)>,
+) -> Type<'gcc> {
     match layout.abi {
         Abi::Scalar(_) => bug!("handled elsewhere"),
         Abi::Vector { ref element, count } => {
@@ -70,7 +77,7 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
                     element
                 };
             return cx.context.new_vector_type(element, count);
-        },
+        }
         Abi::ScalarPair(..) => {
             return cx.type_struct(
                 &[
@@ -87,7 +94,12 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
         // FIXME(eddyb) producing readable type names for trait objects can result
         // in problematically distinct types due to HRTB and subtyping (see #47638).
         // ty::Dynamic(..) |
-        ty::Adt(..) | ty::Closure(..) | ty::CoroutineClosure(..) | ty::Foreign(..) | ty::Coroutine(..) | ty::Str
+        ty::Adt(..)
+        | ty::Closure(..)
+        | ty::CoroutineClosure(..)
+        | ty::Foreign(..)
+        | ty::Coroutine(..)
+        | ty::Str
             if !cx.sess().fewer_names() =>
         {
             let mut name = with_no_trimmed_paths!(layout.ty.to_string());
@@ -125,22 +137,21 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout
                     let gcc_type = cx.type_named_struct(name);
                     cx.set_struct_body(gcc_type, &[fill], packed);
                     gcc_type.as_type()
-                },
+                }
             }
         }
         FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).gcc_type(cx), count),
-        FieldsShape::Arbitrary { .. } =>
-            match name {
-                None => {
-                    let (gcc_fields, packed) = struct_fields(cx, layout);
-                    cx.type_struct(&gcc_fields, packed)
-                },
-                Some(ref name) => {
-                    let gcc_type = cx.type_named_struct(name);
-                    *defer = Some((gcc_type, layout));
-                    gcc_type.as_type()
-                },
-            },
+        FieldsShape::Arbitrary { .. } => match name {
+            None => {
+                let (gcc_fields, packed) = struct_fields(cx, layout);
+                cx.type_struct(&gcc_fields, packed)
+            }
+            Some(ref name) => {
+                let gcc_type = cx.type_named_struct(name);
+                *defer = Some((gcc_type, layout));
+                gcc_type.as_type()
+            }
+        },
     }
 }
 
@@ -149,9 +160,22 @@ pub trait LayoutGccExt<'tcx> {
     fn is_gcc_scalar_pair(&self) -> bool;
     fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
     fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
-    fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>;
-    fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc>;
-    fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option<PointeeInfo>;
+    fn scalar_gcc_type_at<'gcc>(
+        &self,
+        cx: &CodegenCx<'gcc, 'tcx>,
+        scalar: &abi::Scalar,
+        offset: Size,
+    ) -> Type<'gcc>;
+    fn scalar_pair_element_gcc_type<'gcc>(
+        &self,
+        cx: &CodegenCx<'gcc, 'tcx>,
+        index: usize,
+    ) -> Type<'gcc>;
+    fn pointee_info_at<'gcc>(
+        &self,
+        cx: &CodegenCx<'gcc, 'tcx>,
+        offset: Size,
+    ) -> Option<PointeeInfo>;
 }
 
 impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
@@ -191,24 +215,24 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
             if let Some(&ty) = cx.scalar_types.borrow().get(&self.ty) {
                 return ty;
             }
-            let ty =
-                match *self.ty.kind() {
-                    // NOTE: we cannot remove this match like in the LLVM codegen because the call
-                    // to fn_ptr_backend_type handle the on-stack attribute.
-                    // TODO(antoyo): find a less hackish way to hande the on-stack attribute.
-                    ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())),
-                    _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
-                };
+            let ty = match *self.ty.kind() {
+                // NOTE: we cannot remove this match like in the LLVM codegen because the call
+                // to fn_ptr_backend_type handle the on-stack attribute.
+                // TODO(antoyo): find a less hackish way to hande the on-stack attribute.
+                ty::FnPtr(sig) => {
+                    cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
+                }
+                _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO),
+            };
             cx.scalar_types.borrow_mut().insert(self.ty, ty);
             return ty;
         }
 
         // Check the cache.
-        let variant_index =
-            match self.variants {
-                Variants::Single { index } => Some(index),
-                _ => None,
-            };
+        let variant_index = match self.variants {
+            Variants::Single { index } => Some(index),
+            _ => None,
+        };
         let cached_type = cx.types.borrow().get(&(self.ty, variant_index)).cloned();
         if let Some(ty) = cached_type {
             return ty;
@@ -221,17 +245,15 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
         let normal_ty = cx.tcx.erase_regions(self.ty);
 
         let mut defer = None;
-        let ty =
-            if self.ty != normal_ty {
-                let mut layout = cx.layout_of(normal_ty);
-                if let Some(v) = variant_index {
-                    layout = layout.for_variant(cx, v);
-                }
-                layout.gcc_type(cx)
+        let ty = if self.ty != normal_ty {
+            let mut layout = cx.layout_of(normal_ty);
+            if let Some(v) = variant_index {
+                layout = layout.for_variant(cx, v);
             }
-            else {
-                uncached_gcc_type(cx, *self, &mut defer)
-            };
+            layout.gcc_type(cx)
+        } else {
+            uncached_gcc_type(cx, *self, &mut defer)
+        };
 
         cx.types.borrow_mut().insert((self.ty, variant_index), ty);
 
@@ -252,7 +274,12 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
         self.gcc_type(cx)
     }
 
-    fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc> {
+    fn scalar_gcc_type_at<'gcc>(
+        &self,
+        cx: &CodegenCx<'gcc, 'tcx>,
+        scalar: &abi::Scalar,
+        offset: Size,
+    ) -> Type<'gcc> {
         match scalar.primitive() {
             Int(i, true) => cx.type_from_integer(i),
             Int(i, false) => cx.type_from_unsigned_integer(i),
@@ -262,19 +289,21 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
             F128 => cx.type_f128(),
             Pointer(address_space) => {
                 // If we know the alignment, pick something better than i8.
-                let pointee =
-                    if let Some(pointee) = self.pointee_info_at(cx, offset) {
-                        cx.type_pointee_for_align(pointee.align)
-                    }
-                    else {
-                        cx.type_i8()
-                    };
+                let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
+                    cx.type_pointee_for_align(pointee.align)
+                } else {
+                    cx.type_i8()
+                };
                 cx.type_ptr_to_ext(pointee, address_space)
             }
         }
     }
 
-    fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc> {
+    fn scalar_pair_element_gcc_type<'gcc>(
+        &self,
+        cx: &CodegenCx<'gcc, 'tcx>,
+        index: usize,
+    ) -> Type<'gcc> {
         // This must produce the same result for `repr(transparent)` wrappers as for the inner type!
         // In other words, this should generally not look at the type at all, but only at the
         // layout.
@@ -295,13 +324,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
             return cx.type_i1();
         }
 
-        let offset =
-            if index == 0 {
-                Size::ZERO
-            }
-            else {
-                a.size(cx).align_to(b.align(cx).abi)
-            };
+        let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
         self.scalar_gcc_type_at(cx, scalar, offset)
     }
 
@@ -334,7 +357,12 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
         layout.is_gcc_scalar_pair()
     }
 
-    fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, _immediate: bool) -> Type<'gcc> {
+    fn scalar_pair_element_backend_type(
+        &self,
+        layout: TyAndLayout<'tcx>,
+        index: usize,
+        _immediate: bool,
+    ) -> Type<'gcc> {
         layout.scalar_pair_element_gcc_type(self, index)
     }
 
@@ -352,12 +380,7 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
 
     fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Type<'gcc> {
         // FIXME(antoyo): Should we do something with `FnAbiGcc::fn_attributes`?
-        let FnAbiGcc {
-            return_type,
-            arguments_type,
-            is_c_variadic,
-            ..
-        } = fn_abi.gcc_type(self);
+        let FnAbiGcc { return_type, arguments_type, is_c_variadic, .. } = fn_abi.gcc_type(self);
         self.context.new_function_pointer_type(None, return_type, &arguments_type, is_c_variadic)
     }
 }