about summary refs log tree commit diff
path: root/compiler/rustc_codegen_gcc/src/builder.rs
diff options
context:
space:
mode:
authorGuillaume Gomez <guillaume.gomez@huawei.com>2024-03-05 19:58:36 +0100
committerGuillaume Gomez <guillaume.gomez@huawei.com>2024-03-05 19:58:36 +0100
commit0d359efbe6dfcd927d4fd6208cdaed0bbaf33bb6 (patch)
treeff9149569c02c470f2dabf55d66e105d1163ba8f /compiler/rustc_codegen_gcc/src/builder.rs
parent7606c13961ddc1174b70638e934df0439b7dc515 (diff)
parentb385428e3ddf330805241e7758e773f933357c4b (diff)
downloadrust-0d359efbe6dfcd927d4fd6208cdaed0bbaf33bb6.tar.gz
rust-0d359efbe6dfcd927d4fd6208cdaed0bbaf33bb6.zip
Merge commit 'b385428e3ddf330805241e7758e773f933357c4b' into subtree-update_cg_gcc_2024-03-05
Diffstat (limited to 'compiler/rustc_codegen_gcc/src/builder.rs')
-rw-r--r--compiler/rustc_codegen_gcc/src/builder.rs1431
1 files changed, 917 insertions, 514 deletions
diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs
index 71a0a4c2e96..f5cda81f6ab 100644
--- a/compiler/rustc_codegen_gcc/src/builder.rs
+++ b/compiler/rustc_codegen_gcc/src/builder.rs
@@ -4,53 +4,36 @@ use std::convert::TryFrom;
 use std::ops::Deref;
 
 use gccjit::{
-    BinaryOp,
-    Block,
-    ComparisonOp,
-    Context,
-    Function,
-    LValue,
-    RValue,
-    ToRValue,
-    Type,
+    BinaryOp, Block, ComparisonOp, Context, Function, LValue, Location, RValue, ToRValue, Type,
     UnaryOp,
 };
 use rustc_apfloat::{ieee, Float, Round, Status};
-use rustc_codegen_ssa::MemFlags;
 use rustc_codegen_ssa::common::{
     AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope, TypeKind,
 };
 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
 use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::{
-    BackendTypes,
-    BaseTypeMethods,
-    BuilderMethods,
-    ConstMethods,
-    LayoutTypeMethods,
-    HasCodegen,
-    OverflowOp,
-    StaticBuilderMethods,
+    BackendTypes, BaseTypeMethods, BuilderMethods, ConstMethods, HasCodegen, LayoutTypeMethods,
+    OverflowOp, StaticBuilderMethods,
 };
+use rustc_codegen_ssa::MemFlags;
 use rustc_data_structures::fx::FxHashSet;
 use rustc_middle::bug;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::ty::layout::{
+    FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers,
+    TyAndLayout,
+};
 use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
-use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
-use rustc_span::Span;
 use rustc_span::def_id::DefId;
+use rustc_span::Span;
 use rustc_target::abi::{
-    self,
-    call::FnAbi,
-    Align,
-    HasDataLayout,
-    Size,
-    TargetDataLayout,
-    WrappingRange,
+    self, call::FnAbi, Align, HasDataLayout, Size, TargetDataLayout, WrappingRange,
 };
 use rustc_target::spec::{HasTargetSpec, Target};
 
-use crate::common::{SignType, TypeReflection, type_is_pointer};
+use crate::common::{type_is_pointer, SignType, TypeReflection};
 use crate::context::CodegenCx;
 use crate::intrinsic::llvm;
 use crate::type_of::LayoutGccExt;
@@ -70,54 +53,74 @@ pub struct Builder<'a: 'gcc, 'gcc, 'tcx> {
     pub cx: &'a CodegenCx<'gcc, 'tcx>,
     pub block: Block<'gcc>,
     stack_var_count: Cell<usize>,
+    pub location: Option<Location<'gcc>>,
 }
 
 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
     fn with_cx(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Self {
-        Builder {
-            cx,
-            block,
-            stack_var_count: Cell::new(0),
-        }
+        Builder { cx, block, stack_var_count: Cell::new(0), location: None }
     }
 
-    fn atomic_extremum(&mut self, operation: ExtremumOperation, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+    fn atomic_extremum(
+        &mut self,
+        operation: ExtremumOperation,
+        dst: RValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+    ) -> RValue<'gcc> {
         let size = src.get_type().get_size();
 
         let func = self.current_func();
 
-        let load_ordering =
-            match order {
-                // TODO(antoyo): does this make sense?
-                AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
-                _ => order,
-            };
-        let previous_value = self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size));
-        let previous_var = func.new_local(None, previous_value.get_type(), "previous_value");
-        let return_value = func.new_local(None, previous_value.get_type(), "return_value");
-        self.llbb().add_assignment(None, previous_var, previous_value);
-        self.llbb().add_assignment(None, return_value, previous_var.to_rvalue());
+        let load_ordering = match order {
+            // TODO(antoyo): does this make sense?
+            AtomicOrdering::AcquireRelease | AtomicOrdering::Release => AtomicOrdering::Acquire,
+            _ => order,
+        };
+        let previous_value =
+            self.atomic_load(dst.get_type(), dst, load_ordering, Size::from_bytes(size));
+        let previous_var =
+            func.new_local(self.location, previous_value.get_type(), "previous_value");
+        let return_value = func.new_local(self.location, previous_value.get_type(), "return_value");
+        self.llbb().add_assignment(self.location, previous_var, previous_value);
+        self.llbb().add_assignment(self.location, return_value, previous_var.to_rvalue());
 
         let while_block = func.new_block("while");
         let after_block = func.new_block("after_while");
-        self.llbb().end_with_jump(None, while_block);
+        self.llbb().end_with_jump(self.location, while_block);
 
         // NOTE: since jumps were added and compare_exchange doesn't expect this, the current block in the
         // state need to be updated.
         self.switch_to_block(while_block);
 
-        let comparison_operator =
-            match operation {
-                ExtremumOperation::Max => ComparisonOp::LessThan,
-                ExtremumOperation::Min => ComparisonOp::GreaterThan,
-            };
-
-        let cond1 = self.context.new_comparison(None, comparison_operator, previous_var.to_rvalue(), self.context.new_cast(None, src, previous_value.get_type()));
-        let compare_exchange = self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
-        let cond2 = self.cx.context.new_unary_op(None, UnaryOp::LogicalNegate, compare_exchange.get_type(), compare_exchange);
-        let cond = self.cx.context.new_binary_op(None, BinaryOp::LogicalAnd, self.cx.bool_type, cond1, cond2);
+        let comparison_operator = match operation {
+            ExtremumOperation::Max => ComparisonOp::LessThan,
+            ExtremumOperation::Min => ComparisonOp::GreaterThan,
+        };
 
-        while_block.end_with_conditional(None, cond, while_block, after_block);
+        let cond1 = self.context.new_comparison(
+            self.location,
+            comparison_operator,
+            previous_var.to_rvalue(),
+            self.context.new_cast(self.location, src, previous_value.get_type()),
+        );
+        let compare_exchange =
+            self.compare_exchange(dst, previous_var, src, order, load_ordering, false);
+        let cond2 = self.cx.context.new_unary_op(
+            self.location,
+            UnaryOp::LogicalNegate,
+            compare_exchange.get_type(),
+            compare_exchange,
+        );
+        let cond = self.cx.context.new_binary_op(
+            self.location,
+            BinaryOp::LogicalAnd,
+            self.cx.bool_type,
+            cond1,
+            cond2,
+        );
+
+        while_block.end_with_conditional(self.location, cond, while_block, after_block);
 
         // NOTE: since jumps were added in a place rustc does not expect, the current block in the
         // state need to be updated.
@@ -126,29 +129,48 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         return_value.to_rvalue()
     }
 
-    fn compare_exchange(&self, dst: RValue<'gcc>, cmp: LValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> RValue<'gcc> {
+    fn compare_exchange(
+        &self,
+        dst: RValue<'gcc>,
+        cmp: LValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+        failure_order: AtomicOrdering,
+        weak: bool,
+    ) -> RValue<'gcc> {
         let size = src.get_type().get_size();
-        let compare_exchange = self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
+        let compare_exchange =
+            self.context.get_builtin_function(&format!("__atomic_compare_exchange_{}", size));
         let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
         let failure_order = self.context.new_rvalue_from_int(self.i32_type, failure_order.to_gcc());
         let weak = self.context.new_rvalue_from_int(self.bool_type, weak as i32);
 
         let void_ptr_type = self.context.new_type::<*mut ()>();
         let volatile_void_ptr_type = void_ptr_type.make_volatile();
-        let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
-        let expected = self.context.new_cast(None, cmp.get_address(None), void_ptr_type);
+        let dst = self.context.new_cast(self.location, dst, volatile_void_ptr_type);
+        let expected =
+            self.context.new_cast(self.location, cmp.get_address(self.location), void_ptr_type);
 
         // NOTE: not sure why, but we have the wrong type here.
         let int_type = compare_exchange.get_param(2).to_rvalue().get_type();
-        let src = self.context.new_cast(None, src, int_type);
-        self.context.new_call(None, compare_exchange, &[dst, expected, src, weak, order, failure_order])
+        let src = self.context.new_cast(self.location, src, int_type);
+        self.context.new_call(
+            self.location,
+            compare_exchange,
+            &[dst, expected, src, weak, order, failure_order],
+        )
     }
 
     pub fn assign(&self, lvalue: LValue<'gcc>, value: RValue<'gcc>) {
-        self.llbb().add_assignment(None, lvalue, value);
+        self.llbb().add_assignment(self.location, lvalue, value);
     }
 
-    fn check_call<'b>(&mut self, _typ: &str, func: Function<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
+    fn check_call<'b>(
+        &mut self,
+        _typ: &str,
+        func: Function<'gcc>,
+        args: &'b [RValue<'gcc>],
+    ) -> Cow<'b, [RValue<'gcc>]> {
         let mut all_args_match = true;
         let mut param_types = vec![];
         let param_count = func.get_param_count();
@@ -173,8 +195,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                 let actual_ty = actual_val.get_type();
                 if expected_ty != actual_ty {
                     self.bitcast(actual_val, expected_ty)
-                }
-                else {
+                } else {
                     actual_val
                 }
             })
@@ -185,7 +206,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         Cow::Owned(casted_args)
     }
 
-    fn check_ptr_call<'b>(&mut self, _typ: &str, func_ptr: RValue<'gcc>, args: &'b [RValue<'gcc>]) -> Cow<'b, [RValue<'gcc>]> {
+    fn check_ptr_call<'b>(
+        &mut self,
+        _typ: &str,
+        func_ptr: RValue<'gcc>,
+        args: &'b [RValue<'gcc>],
+    ) -> Cow<'b, [RValue<'gcc>]> {
         let mut all_args_match = true;
         let mut param_types = vec![];
         let gcc_func = func_ptr.get_type().dyncast_function_ptr_type().expect("function ptr");
@@ -219,20 +245,32 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 
                 let actual_ty = actual_val.get_type();
                 if expected_ty != actual_ty {
-                    if !actual_ty.is_vector() && !expected_ty.is_vector() && (actual_ty.is_integral() && expected_ty.is_integral()) || (actual_ty.get_pointee().is_some() && expected_ty.get_pointee().is_some()) {
-                        self.context.new_cast(None, actual_val, expected_ty)
-                    }
-                    else if on_stack_param_indices.contains(&index) {
-                        actual_val.dereference(None).to_rvalue()
-                    }
-                    else {
-                        assert!(!((actual_ty.is_vector() && !expected_ty.is_vector()) || (!actual_ty.is_vector() && expected_ty.is_vector())), "{:?} ({}) -> {:?} ({}), index: {:?}[{}]", actual_ty, actual_ty.is_vector(), expected_ty, expected_ty.is_vector(), func_ptr, index);
+                    if !actual_ty.is_vector()
+                        && !expected_ty.is_vector()
+                        && (actual_ty.is_integral() && expected_ty.is_integral())
+                        || (actual_ty.get_pointee().is_some()
+                            && expected_ty.get_pointee().is_some())
+                    {
+                        self.context.new_cast(self.location, actual_val, expected_ty)
+                    } else if on_stack_param_indices.contains(&index) {
+                        actual_val.dereference(self.location).to_rvalue()
+                    } else {
+                        assert!(
+                            !((actual_ty.is_vector() && !expected_ty.is_vector())
+                                || (!actual_ty.is_vector() && expected_ty.is_vector())),
+                            "{:?} ({}) -> {:?} ({}), index: {:?}[{}]",
+                            actual_ty,
+                            actual_ty.is_vector(),
+                            expected_ty,
+                            expected_ty.is_vector(),
+                            func_ptr,
+                            index
+                        );
                         // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
                         // TODO: remove bitcast now that vector types can be compared?
                         self.bitcast(actual_val, expected_ty)
                     }
-                }
-                else {
+                } else {
                     actual_val
                 }
             })
@@ -256,7 +294,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         self.block.get_function()
     }
 
-    fn function_call(&mut self, func: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+    fn function_call(
+        &mut self,
+        func: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
         // TODO(antoyo): remove when the API supports a different type for functions.
         let func: Function<'gcc> = self.cx.rvalue_as_function(func);
         let args = self.check_call("call", func, args);
@@ -268,35 +311,54 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let current_func = self.block.get_function();
         if return_type != void_type {
             unsafe { RETURN_VALUE_COUNT += 1 };
-            let result = current_func.new_local(None, return_type, &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }));
-            self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+            let result = current_func.new_local(
+                self.location,
+                return_type,
+                &format!("returnValue{}", unsafe { RETURN_VALUE_COUNT }),
+            );
+            self.block.add_assignment(
+                self.location,
+                result,
+                self.cx.context.new_call(self.location, func, &args),
+            );
             result.to_rvalue()
-        }
-        else {
-            self.block.add_eval(None, self.cx.context.new_call(None, func, &args));
+        } else {
+            self.block
+                .add_eval(self.location, self.cx.context.new_call(self.location, func, &args));
             // Return dummy value when not having return value.
             self.context.new_rvalue_from_long(self.isize_type, 0)
         }
     }
 
-    fn function_ptr_call(&mut self, typ: Type<'gcc>, mut func_ptr: RValue<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
-        let gcc_func =
-            match func_ptr.get_type().dyncast_function_ptr_type() {
-                Some(func) => func,
-                None => {
-                    // NOTE: due to opaque pointers now being used, we need to cast here.
-                    let new_func_type = typ.dyncast_function_ptr_type().expect("function ptr");
-                    func_ptr = self.context.new_cast(None, func_ptr, typ);
-                    new_func_type
-                },
-            };
+    fn function_ptr_call(
+        &mut self,
+        typ: Type<'gcc>,
+        mut func_ptr: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
+        let gcc_func = match func_ptr.get_type().dyncast_function_ptr_type() {
+            Some(func) => func,
+            None => {
+                // NOTE: due to opaque pointers now being used, we need to cast here.
+                let new_func_type = typ.dyncast_function_ptr_type().expect("function ptr");
+                func_ptr = self.context.new_cast(self.location, func_ptr, typ);
+                new_func_type
+            }
+        };
         let func_name = format!("{:?}", func_ptr);
         let previous_arg_count = args.len();
         let orig_args = args;
         let args = {
             let function_address_names = self.function_address_names.borrow();
             let original_function_name = function_address_names.get(&func_ptr);
-            llvm::adjust_intrinsic_arguments(&self, gcc_func, args.into(), &func_name, original_function_name)
+            llvm::adjust_intrinsic_arguments(
+                &self,
+                gcc_func,
+                args.into(),
+                &func_name,
+                original_function_name,
+            )
         };
         let args_adjusted = args.len() != previous_arg_count;
         let args = self.check_ptr_call("call", func_ptr, &*args);
@@ -309,39 +371,78 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 
         if return_type != void_type {
             unsafe { RETURN_VALUE_COUNT += 1 };
-            let return_value = self.cx.context.new_call_through_ptr(None, func_ptr, &args);
-            let return_value = llvm::adjust_intrinsic_return_value(&self, return_value, &func_name, &args, args_adjusted, orig_args);
-            let result = current_func.new_local(None, return_value.get_type(), &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
-            self.block.add_assignment(None, result, return_value);
+            let return_value = self.cx.context.new_call_through_ptr(self.location, func_ptr, &args);
+            let return_value = llvm::adjust_intrinsic_return_value(
+                &self,
+                return_value,
+                &func_name,
+                &args,
+                args_adjusted,
+                orig_args,
+            );
+            let result = current_func.new_local(
+                self.location,
+                return_value.get_type(),
+                &format!("ptrReturnValue{}", unsafe { RETURN_VALUE_COUNT }),
+            );
+            self.block.add_assignment(self.location, result, return_value);
             result.to_rvalue()
-        }
-        else {
-            #[cfg(not(feature="master"))]
+        } else {
+            #[cfg(not(feature = "master"))]
             if gcc_func.get_param_count() == 0 {
                 // FIXME(antoyo): As a temporary workaround for unsupported LLVM intrinsics.
-                self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &[]));
-            }
-            else {
-                self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+                self.block.add_eval(
+                    self.location,
+                    self.cx.context.new_call_through_ptr(self.location, func_ptr, &[]),
+                );
+            } else {
+                self.block.add_eval(
+                    self.location,
+                    self.cx.context.new_call_through_ptr(self.location, func_ptr, &args),
+                );
             }
-            #[cfg(feature="master")]
-            self.block.add_eval(None, self.cx.context.new_call_through_ptr(None, func_ptr, &args));
+            #[cfg(feature = "master")]
+            self.block.add_eval(
+                self.location,
+                self.cx.context.new_call_through_ptr(self.location, func_ptr, &args),
+            );
             // Return dummy value when not having return value.
-            let result = current_func.new_local(None, self.isize_type, "dummyValueThatShouldNeverBeUsed");
-            self.block.add_assignment(None, result, self.context.new_rvalue_from_long(self.isize_type, 0));
+            let result = current_func.new_local(
+                self.location,
+                self.isize_type,
+                "dummyValueThatShouldNeverBeUsed",
+            );
+            self.block.add_assignment(
+                self.location,
+                result,
+                self.context.new_rvalue_from_long(self.isize_type, 0),
+            );
             result.to_rvalue()
         }
     }
 
-    pub fn overflow_call(&self, func: Function<'gcc>, args: &[RValue<'gcc>], _funclet: Option<&Funclet>) -> RValue<'gcc> {
+    pub fn overflow_call(
+        &self,
+        func: Function<'gcc>,
+        args: &[RValue<'gcc>],
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
         // gccjit requires to use the result of functions, even when it's not used.
         // That's why we assign the result to a local.
         let return_type = self.context.new_type::<bool>();
         let current_func = self.block.get_function();
         // TODO(antoyo): return the new_call() directly? Since the overflow function has no side-effects.
         unsafe { RETURN_VALUE_COUNT += 1 };
-        let result = current_func.new_local(None, return_type, &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }));
-        self.block.add_assignment(None, result, self.cx.context.new_call(None, func, &args));
+        let result = current_func.new_local(
+            self.location,
+            return_type,
+            &format!("overflowReturnValue{}", unsafe { RETURN_VALUE_COUNT }),
+        );
+        self.block.add_assignment(
+            self.location,
+            result,
+            self.cx.context.new_call(self.location, func, &args),
+        );
         result.to_rvalue()
     }
 }
@@ -405,6 +506,17 @@ impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
     type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
 }
 
+fn set_rvalue_location<'a, 'gcc, 'tcx>(
+    bx: &mut Builder<'a, 'gcc, 'tcx>,
+    rvalue: RValue<'gcc>,
+) -> RValue<'gcc> {
+    if bx.location.is_some() {
+        #[cfg(feature = "master")]
+        rvalue.set_location(bx.location.unwrap());
+    }
+    rvalue
+}
+
 impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Builder<'a, 'gcc, 'tcx> {
         Builder::with_cx(cx, block)
@@ -429,43 +541,58 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     fn ret_void(&mut self) {
-        self.llbb().end_with_void_return(None)
+        self.llbb().end_with_void_return(self.location)
     }
 
     fn ret(&mut self, mut value: RValue<'gcc>) {
         if self.structs_as_pointer.borrow().contains(&value) {
             // NOTE: hack to workaround a limitation of the rustc API: see comment on
             // CodegenCx.structs_as_pointer
-            value = value.dereference(None).to_rvalue();
+            value = value.dereference(self.location).to_rvalue();
         }
         let expected_return_type = self.current_func().get_return_type();
         if !expected_return_type.is_compatible_with(value.get_type()) {
             // NOTE: due to opaque pointers now being used, we need to cast here.
-            value = self.context.new_cast(None, value, expected_return_type);
+            value = self.context.new_cast(self.location, value, expected_return_type);
         }
-        self.llbb().end_with_return(None, value);
+        self.llbb().end_with_return(self.location, value);
     }
 
     fn br(&mut self, dest: Block<'gcc>) {
-        self.llbb().end_with_jump(None, dest)
+        self.llbb().end_with_jump(self.location, dest)
     }
 
     fn cond_br(&mut self, cond: RValue<'gcc>, then_block: Block<'gcc>, else_block: Block<'gcc>) {
-        self.llbb().end_with_conditional(None, cond, then_block, else_block)
+        self.llbb().end_with_conditional(self.location, cond, then_block, else_block)
     }
 
-    fn switch(&mut self, value: RValue<'gcc>, default_block: Block<'gcc>, cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>) {
+    fn switch(
+        &mut self,
+        value: RValue<'gcc>,
+        default_block: Block<'gcc>,
+        cases: impl ExactSizeIterator<Item = (u128, Block<'gcc>)>,
+    ) {
         let mut gcc_cases = vec![];
         let typ = self.val_ty(value);
         for (on_val, dest) in cases {
             let on_val = self.const_uint_big(typ, on_val);
             gcc_cases.push(self.context.new_case(on_val, on_val, dest));
         }
-        self.block.end_with_switch(None, value, default_block, &gcc_cases);
+        self.block.end_with_switch(self.location, value, default_block, &gcc_cases);
     }
 
-    #[cfg(feature="master")]
-    fn invoke(&mut self, typ: Type<'gcc>, fn_attrs: Option<&CodegenFnAttrs>, _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
+    #[cfg(feature = "master")]
+    fn invoke(
+        &mut self,
+        typ: Type<'gcc>,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        _fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        func: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        then: Block<'gcc>,
+        catch: Block<'gcc>,
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
         let try_block = self.current_func().new_block("try");
 
         let current_block = self.block.clone();
@@ -473,30 +600,39 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let call = self.call(typ, fn_attrs, None, func, args, None); // TODO(antoyo): use funclet here?
         self.block = current_block;
 
-        let return_value = self.current_func()
-            .new_local(None, call.get_type(), "invokeResult");
+        let return_value =
+            self.current_func().new_local(self.location, call.get_type(), "invokeResult");
 
-        try_block.add_assignment(None, return_value, call);
+        try_block.add_assignment(self.location, return_value, call);
 
-        try_block.end_with_jump(None, then);
+        try_block.end_with_jump(self.location, then);
 
         if self.cleanup_blocks.borrow().contains(&catch) {
-            self.block.add_try_finally(None, try_block, catch);
-        }
-        else {
-            self.block.add_try_catch(None, try_block, catch);
+            self.block.add_try_finally(self.location, try_block, catch);
+        } else {
+            self.block.add_try_catch(self.location, try_block, catch);
         }
 
-        self.block.end_with_jump(None, then);
+        self.block.end_with_jump(self.location, then);
 
         return_value.to_rvalue()
     }
 
-    #[cfg(not(feature="master"))]
-    fn invoke(&mut self, typ: Type<'gcc>, fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, func: RValue<'gcc>, args: &[RValue<'gcc>], then: Block<'gcc>, catch: Block<'gcc>, _funclet: Option<&Funclet>) -> RValue<'gcc> {
+    #[cfg(not(feature = "master"))]
+    fn invoke(
+        &mut self,
+        typ: Type<'gcc>,
+        fn_attrs: Option<&CodegenFnAttrs>,
+        fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>,
+        func: RValue<'gcc>,
+        args: &[RValue<'gcc>],
+        then: Block<'gcc>,
+        catch: Block<'gcc>,
+        _funclet: Option<&Funclet>,
+    ) -> RValue<'gcc> {
         let call_site = self.call(typ, fn_attrs, None, func, args, None);
         let condition = self.context.new_rvalue_from_int(self.bool_type, 1);
-        self.llbb().end_with_conditional(None, condition, then, catch);
+        self.llbb().end_with_conditional(self.location, condition, then, catch);
         if let Some(_fn_abi) = fn_abi {
             // TODO(bjorn3): Apply function attributes
         }
@@ -505,16 +641,15 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
 
     fn unreachable(&mut self) {
         let func = self.context.get_builtin_function("__builtin_unreachable");
-        self.block.add_eval(None, self.context.new_call(None, func, &[]));
+        self.block.add_eval(self.location, self.context.new_call(self.location, func, &[]));
         let return_type = self.block.get_function().get_return_type();
         let void_type = self.context.new_type::<()>();
         if return_type == void_type {
-            self.block.end_with_void_return(None)
-        }
-        else {
-            let return_value = self.current_func()
-                .new_local(None, return_type, "unreachableReturn");
-            self.block.end_with_return(None, return_value)
+            self.block.end_with_void_return(self.location)
+        } else {
+            let return_value =
+                self.current_func().new_local(self.location, return_type, "unreachableReturn");
+            self.block.end_with_return(self.location, return_value)
         }
     }
 
@@ -539,7 +674,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     fn fmul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        a * b
+        self.cx.context.new_binary_op(self.location, BinaryOp::Mult, a.get_type(), a, b)
     }
 
     fn udiv(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
@@ -564,7 +699,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         // FIXME(antoyo): rustc_codegen_ssa::mir::intrinsic uses different types for a and b but they
         // should be the same.
         let typ = a.get_type().to_signed(self);
-        let b = self.context.new_cast(None, b, typ);
+        let b = self.context.new_cast(self.location, b, typ);
         a / b
     }
 
@@ -606,15 +741,32 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         //     ../../../gcc/gcc/cfgexpand.cc:6069
         // 0x7f0101bf9194 execute
         //     ../../../gcc/gcc/cfgexpand.cc:6795
-        if a.get_type().is_compatible_with(self.cx.float_type) {
+        let a_type = a.get_type();
+        let a_type_unqualified = a_type.unqualified();
+        if a_type.is_compatible_with(self.cx.float_type) {
             let fmodf = self.context.get_builtin_function("fmodf");
             // FIXME(antoyo): this seems to produce the wrong result.
-            return self.context.new_call(None, fmodf, &[a, b]);
+            return self.context.new_call(self.location, fmodf, &[a, b]);
+        }
+        if let Some(vector_type) = a_type_unqualified.dyncast_vector() {
+            assert_eq!(a_type_unqualified, b.get_type().unqualified());
+
+            let num_units = vector_type.get_num_units();
+            let new_elements: Vec<_> = (0..num_units)
+                .map(|i| {
+                    let index = self.context.new_rvalue_from_long(self.cx.type_u32(), i as _);
+                    let x = self.extract_element(a, index).to_rvalue();
+                    let y = self.extract_element(b, index).to_rvalue();
+                    self.frem(x, y)
+                })
+                .collect();
+
+            return self.context.new_rvalue_from_vector(self.location, a_type, &new_elements);
         }
-        assert_eq!(a.get_type().unqualified(), self.cx.double_type);
+        assert_eq!(a_type_unqualified, self.cx.double_type);
 
         let fmod = self.context.get_builtin_function("fmod");
-        return self.context.new_call(None, fmod, &[a, b]);
+        self.context.new_call(self.location, fmod, &[a, b])
     }
 
     fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
@@ -636,73 +788,78 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.cx.gcc_or(a, b)
+        self.cx.gcc_or(a, b, self.location)
     }
 
     fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_xor(a, b)
+        set_rvalue_location(self, self.gcc_xor(a, b))
     }
 
     fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_neg(a)
+        set_rvalue_location(self, self.gcc_neg(a))
     }
 
     fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
-        self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
+        set_rvalue_location(
+            self,
+            self.cx.context.new_unary_op(self.location, UnaryOp::Minus, a.get_type(), a),
+        )
     }
 
     fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_not(a)
+        set_rvalue_location(self, self.gcc_not(a))
     }
 
     fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_add(a, b)
+        set_rvalue_location(self, self.gcc_add(a, b))
     }
 
     fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_add(a, b)
+        set_rvalue_location(self, self.gcc_add(a, b))
     }
 
     fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_sub(a, b)
+        set_rvalue_location(self, self.gcc_sub(a, b))
     }
 
     fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
         // TODO(antoyo): should generate poison value?
-        self.gcc_sub(a, b)
+        set_rvalue_location(self, self.gcc_sub(a, b))
     }
 
     fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_mul(a, b)
+        set_rvalue_location(self, self.gcc_mul(a, b))
     }
 
     fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_mul(a, b)
+        set_rvalue_location(self, self.gcc_mul(a, b))
     }
 
     fn fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs + rhs
+        set_rvalue_location(self, lhs + rhs)
     }
 
     fn fsub_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs - rhs
+        set_rvalue_location(self, lhs - rhs)
     }
 
     fn fmul_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs * rhs
+        set_rvalue_location(self, lhs * rhs)
     }
 
     fn fdiv_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs / rhs
+        set_rvalue_location(self, lhs / rhs)
     }
 
     fn frem_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        self.frem(lhs, rhs)
+        let result = self.frem(lhs, rhs);
+        set_rvalue_location(self, result);
+        result
     }
 
     fn fadd_algebraic(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
@@ -730,23 +887,33 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         self.frem(lhs, rhs)
     }
 
-    fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
+    fn checked_binop(
+        &mut self,
+        oop: OverflowOp,
+        typ: Ty<'_>,
+        lhs: Self::Value,
+        rhs: Self::Value,
+    ) -> (Self::Value, Self::Value) {
         self.gcc_checked_binop(oop, typ, lhs, rhs)
     }
 
     fn alloca(&mut self, ty: Type<'gcc>, align: Align) -> RValue<'gcc> {
         // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
         // Ideally, we shouldn't need to do this check.
-        let aligned_type =
-            if ty == self.cx.u128_type || ty == self.cx.i128_type {
-                ty
-            }
-            else {
-                ty.get_aligned(align.bytes())
-            };
+        let aligned_type = if ty == self.cx.u128_type || ty == self.cx.i128_type {
+            ty
+        } else {
+            ty.get_aligned(align.bytes())
+        };
         // TODO(antoyo): It might be better to return a LValue, but fixing the rustc API is non-trivial.
         self.stack_var_count.set(self.stack_var_count.get() + 1);
-        self.current_func().new_local(None, aligned_type, &format!("stack_var_{}", self.stack_var_count.get())).get_address(None)
+        self.current_func()
+            .new_local(
+                self.location,
+                aligned_type,
+                &format!("stack_var_{}", self.stack_var_count.get()),
+            )
+            .get_address(self.location)
     }
 
     fn byte_array_alloca(&mut self, _len: RValue<'gcc>, _align: Align) -> RValue<'gcc> {
@@ -761,48 +928,62 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         // dereference after a drop, for instance.
         // FIXME(antoyo): this check that we don't call get_aligned() a second time on a type.
         // Ideally, we shouldn't need to do this check.
-        let aligned_type =
-            if pointee_ty == self.cx.u128_type || pointee_ty == self.cx.i128_type {
-                pointee_ty
-            }
-            else {
-                pointee_ty.get_aligned(align.bytes())
-            };
-        let ptr = self.context.new_cast(None, ptr, aligned_type.make_pointer());
-        let deref = ptr.dereference(None).to_rvalue();
+        let aligned_type = if pointee_ty == self.cx.u128_type || pointee_ty == self.cx.i128_type {
+            pointee_ty
+        } else {
+            pointee_ty.get_aligned(align.bytes())
+        };
+        let ptr = self.context.new_cast(self.location, ptr, aligned_type.make_pointer());
+        let deref = ptr.dereference(self.location).to_rvalue();
         unsafe { RETURN_VALUE_COUNT += 1 };
-        let loaded_value = function.new_local(None, aligned_type, &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }));
-        block.add_assignment(None, loaded_value, deref);
+        let loaded_value = function.new_local(
+            self.location,
+            aligned_type,
+            &format!("loadedValue{}", unsafe { RETURN_VALUE_COUNT }),
+        );
+        block.add_assignment(self.location, loaded_value, deref);
         loaded_value.to_rvalue()
     }
 
     fn volatile_load(&mut self, ty: Type<'gcc>, ptr: RValue<'gcc>) -> RValue<'gcc> {
-        let ptr = self.context.new_cast(None, ptr, ty.make_volatile().make_pointer());
-        ptr.dereference(None).to_rvalue()
+        let ptr = self.context.new_cast(self.location, ptr, ty.make_volatile().make_pointer());
+        ptr.dereference(self.location).to_rvalue()
     }
 
-    fn atomic_load(&mut self, _ty: Type<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) -> RValue<'gcc> {
+    fn atomic_load(
+        &mut self,
+        _ty: Type<'gcc>,
+        ptr: RValue<'gcc>,
+        order: AtomicOrdering,
+        size: Size,
+    ) -> RValue<'gcc> {
         // TODO(antoyo): use ty.
         // TODO(antoyo): handle alignment.
-        let atomic_load = self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
+        let atomic_load =
+            self.context.get_builtin_function(&format!("__atomic_load_{}", size.bytes()));
         let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
 
-        let volatile_const_void_ptr_type = self.context.new_type::<()>()
-            .make_const()
-            .make_volatile()
-            .make_pointer();
-        let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
-        self.context.new_call(None, atomic_load, &[ptr, ordering])
+        let volatile_const_void_ptr_type =
+            self.context.new_type::<()>().make_const().make_volatile().make_pointer();
+        let ptr = self.context.new_cast(self.location, ptr, volatile_const_void_ptr_type);
+        self.context.new_call(self.location, atomic_load, &[ptr, ordering])
     }
 
-    fn load_operand(&mut self, place: PlaceRef<'tcx, RValue<'gcc>>) -> OperandRef<'tcx, RValue<'gcc>> {
+    fn load_operand(
+        &mut self,
+        place: PlaceRef<'tcx, RValue<'gcc>>,
+    ) -> OperandRef<'tcx, RValue<'gcc>> {
         assert_eq!(place.llextra.is_some(), place.layout.is_unsized());
 
         if place.layout.is_zst() {
             return OperandRef::zero_sized(place.layout);
         }
 
-        fn scalar_load_metadata<'a, 'gcc, 'tcx>(bx: &mut Builder<'a, 'gcc, 'tcx>, load: RValue<'gcc>, scalar: &abi::Scalar) {
+        fn scalar_load_metadata<'a, 'gcc, 'tcx>(
+            bx: &mut Builder<'a, 'gcc, 'tcx>,
+            load: RValue<'gcc>,
+            scalar: &abi::Scalar,
+        ) {
             let vr = scalar.valid_range(bx);
             match scalar.primitive() {
                 abi::Int(..) => {
@@ -817,49 +998,50 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
             }
         }
 
-        let val =
-            if let Some(llextra) = place.llextra {
-                OperandValue::Ref(place.llval, Some(llextra), place.align)
-            }
-            else if place.layout.is_gcc_immediate() {
-                let load = self.load(
-                    place.layout.gcc_type(self),
-                    place.llval,
-                    place.align,
-                );
-                if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
-                    scalar_load_metadata(self, load, scalar);
-                }
-                OperandValue::Immediate(self.to_immediate(load, place.layout))
+        let val = if let Some(llextra) = place.llextra {
+            OperandValue::Ref(place.llval, Some(llextra), place.align)
+        } else if place.layout.is_gcc_immediate() {
+            let load = self.load(place.layout.gcc_type(self), place.llval, place.align);
+            if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
+                scalar_load_metadata(self, load, scalar);
             }
-            else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
-                let b_offset = a.size(self).align_to(b.align(self).abi);
-
-                let mut load = |i, scalar: &abi::Scalar, align| {
-                    let llptr = if i == 0 {
-                        place.llval
-                    } else {
-                        self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
-                    };
-                    let llty = place.layout.scalar_pair_element_gcc_type(self, i);
-                    let load = self.load(llty, llptr, align);
-                    scalar_load_metadata(self, load, scalar);
-                    if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load }
+            OperandValue::Immediate(self.to_immediate(load, place.layout))
+        } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
+            let b_offset = a.size(self).align_to(b.align(self).abi);
+
+            let mut load = |i, scalar: &abi::Scalar, align| {
+                let llptr = if i == 0 {
+                    place.llval
+                } else {
+                    self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes()))
                 };
-
-                OperandValue::Pair(
-                    load(0, a, place.align),
-                    load(1, b, place.align.restrict_for_offset(b_offset)),
-                )
-            }
-            else {
-                OperandValue::Ref(place.llval, None, place.align)
+                let llty = place.layout.scalar_pair_element_gcc_type(self, i);
+                let load = self.load(llty, llptr, align);
+                scalar_load_metadata(self, load, scalar);
+                if scalar.is_bool() {
+                    self.trunc(load, self.type_i1())
+                } else {
+                    load
+                }
             };
 
+            OperandValue::Pair(
+                load(0, a, place.align),
+                load(1, b, place.align.restrict_for_offset(b_offset)),
+            )
+        } else {
+            OperandValue::Ref(place.llval, None, place.align)
+        };
+
         OperandRef { val, layout: place.layout }
     }
 
-    fn write_operand_repeatedly(&mut self, cg_elem: OperandRef<'tcx, RValue<'gcc>>, count: u64, dest: PlaceRef<'tcx, RValue<'gcc>>) {
+    fn write_operand_repeatedly(
+        &mut self,
+        cg_elem: OperandRef<'tcx, RValue<'gcc>>,
+        count: u64,
+        dest: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
         let zero = self.const_usize(0);
         let count = self.const_usize(count);
         let start = dest.project_index(self, zero).llval;
@@ -870,7 +1052,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let next_bb = self.append_sibling_block("repeat_loop_next");
 
         let ptr_type = start.get_type();
-        let current = self.llbb().get_function().new_local(None, ptr_type, "loop_var");
+        let current = self.llbb().get_function().new_local(self.location, ptr_type, "loop_var");
         let current_val = current.to_rvalue();
         self.assign(current, start);
 
@@ -884,8 +1066,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let align = dest.align.restrict_for_offset(dest.layout.field(self.cx(), 0).size);
         cg_elem.val.store(self, PlaceRef::new_sized_aligned(current_val, cg_elem.layout, align));
 
-        let next = self.inbounds_gep(self.backend_type(cg_elem.layout), current.to_rvalue(), &[self.const_usize(1)]);
-        self.llbb().add_assignment(None, current, next);
+        let next = self.inbounds_gep(
+            self.backend_type(cg_elem.layout),
+            current.to_rvalue(),
+            &[self.const_usize(1)],
+        );
+        self.llbb().add_assignment(self.location, current, next);
         self.br(header_bb);
 
         self.switch_to_block(next_bb);
@@ -903,75 +1089,100 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         self.store_with_flags(val, ptr, align, MemFlags::empty())
     }
 
-    fn store_with_flags(&mut self, val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align, _flags: MemFlags) -> RValue<'gcc> {
+    fn store_with_flags(
+        &mut self,
+        val: RValue<'gcc>,
+        ptr: RValue<'gcc>,
+        align: Align,
+        _flags: MemFlags,
+    ) -> RValue<'gcc> {
         let ptr = self.check_store(val, ptr);
-        let destination = ptr.dereference(None);
+        let destination = ptr.dereference(self.location);
         // NOTE: libgccjit does not support specifying the alignment on the assignment, so we cast
         // to type so it gets the proper alignment.
         let destination_type = destination.to_rvalue().get_type().unqualified();
         let aligned_type = destination_type.get_aligned(align.bytes()).make_pointer();
-        let aligned_destination = self.cx.context.new_bitcast(None, ptr, aligned_type);
-        let aligned_destination = aligned_destination.dereference(None);
-        self.llbb().add_assignment(None, aligned_destination, val);
+        let aligned_destination = self.cx.context.new_bitcast(self.location, ptr, aligned_type);
+        let aligned_destination = aligned_destination.dereference(self.location);
+        self.llbb().add_assignment(self.location, aligned_destination, val);
         // TODO(antoyo): handle align and flags.
         // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here?
         self.cx.context.new_rvalue_zero(self.type_i32())
     }
 
-    fn atomic_store(&mut self, value: RValue<'gcc>, ptr: RValue<'gcc>, order: AtomicOrdering, size: Size) {
+    fn atomic_store(
+        &mut self,
+        value: RValue<'gcc>,
+        ptr: RValue<'gcc>,
+        order: AtomicOrdering,
+        size: Size,
+    ) {
         // TODO(antoyo): handle alignment.
-        let atomic_store = self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
+        let atomic_store =
+            self.context.get_builtin_function(&format!("__atomic_store_{}", size.bytes()));
         let ordering = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
-        let volatile_const_void_ptr_type = self.context.new_type::<()>()
-            .make_volatile()
-            .make_pointer();
-        let ptr = self.context.new_cast(None, ptr, volatile_const_void_ptr_type);
+        let volatile_const_void_ptr_type =
+            self.context.new_type::<()>().make_volatile().make_pointer();
+        let ptr = self.context.new_cast(self.location, ptr, volatile_const_void_ptr_type);
 
         // FIXME(antoyo): fix libgccjit to allow comparing an integer type with an aligned integer type because
         // the following cast is required to avoid this error:
         // gcc_jit_context_new_call: mismatching types for argument 2 of function "__atomic_store_4": assignment to param arg1 (type: int) from loadedValue3577 (type: unsigned int  __attribute__((aligned(4))))
         let int_type = atomic_store.get_param(1).to_rvalue().get_type();
-        let value = self.context.new_cast(None, value, int_type);
-        self.llbb()
-            .add_eval(None, self.context.new_call(None, atomic_store, &[ptr, value, ordering]));
+        let value = self.context.new_cast(self.location, value, int_type);
+        self.llbb().add_eval(
+            self.location,
+            self.context.new_call(self.location, atomic_store, &[ptr, value, ordering]),
+        );
     }
 
-    fn gep(&mut self, typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+    fn gep(
+        &mut self,
+        typ: Type<'gcc>,
+        ptr: RValue<'gcc>,
+        indices: &[RValue<'gcc>],
+    ) -> RValue<'gcc> {
         // NOTE: due to opaque pointers now being used, we need to cast here.
-        let ptr = self.context.new_cast(None, ptr, typ.make_pointer());
+        let ptr = self.context.new_cast(self.location, ptr, typ.make_pointer());
         let ptr_type = ptr.get_type();
         let mut pointee_type = ptr.get_type();
         // NOTE: we cannot use array indexing here like in inbounds_gep because array indexing is
         // always considered in bounds in GCC (TODO(antoyo): to be verified).
         // So, we have to cast to a number.
-        let mut result = self.context.new_bitcast(None, ptr, self.sizet_type);
+        let mut result = self.context.new_bitcast(self.location, ptr, self.sizet_type);
         // FIXME(antoyo): if there were more than 1 index, this code is probably wrong and would
         // require dereferencing the pointer.
         for index in indices {
             pointee_type = pointee_type.get_pointee().expect("pointee type");
-            #[cfg(feature="master")]
+            #[cfg(feature = "master")]
             let pointee_size = {
                 let size = self.cx.context.new_sizeof(pointee_type);
-                self.context.new_cast(None, size, index.get_type())
+                self.context.new_cast(self.location, size, index.get_type())
             };
-            #[cfg(not(feature="master"))]
-            let pointee_size = self.context.new_rvalue_from_int(index.get_type(), pointee_type.get_size() as i32);
+            #[cfg(not(feature = "master"))]
+            let pointee_size =
+                self.context.new_rvalue_from_int(index.get_type(), pointee_type.get_size() as i32);
             result = result + self.gcc_int_cast(*index * pointee_size, self.sizet_type);
         }
-        self.context.new_bitcast(None, result, ptr_type)
+        self.context.new_bitcast(self.location, result, ptr_type)
     }
 
-    fn inbounds_gep(&mut self, typ: Type<'gcc>, ptr: RValue<'gcc>, indices: &[RValue<'gcc>]) -> RValue<'gcc> {
+    fn inbounds_gep(
+        &mut self,
+        typ: Type<'gcc>,
+        ptr: RValue<'gcc>,
+        indices: &[RValue<'gcc>],
+    ) -> RValue<'gcc> {
         // NOTE: due to opaque pointers now being used, we need to cast here.
-        let ptr = self.context.new_cast(None, ptr, typ.make_pointer());
+        let ptr = self.context.new_cast(self.location, ptr, typ.make_pointer());
         // NOTE: array indexing is always considered in bounds in GCC (TODO(antoyo): to be verified).
         let mut indices = indices.into_iter();
         let index = indices.next().expect("first index in inbounds_gep");
-        let mut result = self.context.new_array_access(None, ptr, *index);
+        let mut result = self.context.new_array_access(self.location, ptr, *index);
         for index in indices {
-            result = self.context.new_array_access(None, result, *index);
+            result = self.context.new_array_access(self.location, result, *index);
         }
-        result.get_address(None)
+        result.get_address(self.location)
     }
 
     /* Casts */
@@ -986,32 +1197,32 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
             // TODO(antoyo): nothing to do as it is only for LLVM?
             return value;
         }
-        self.context.new_cast(None, value, dest_ty)
+        self.context.new_cast(self.location, value, dest_ty)
     }
 
     fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_float_to_uint_cast(value, dest_ty)
+        set_rvalue_location(self, self.gcc_float_to_uint_cast(value, dest_ty))
     }
 
     fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_float_to_int_cast(value, dest_ty)
+        set_rvalue_location(self, self.gcc_float_to_int_cast(value, dest_ty))
     }
 
     fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_uint_to_float_cast(value, dest_ty)
+        set_rvalue_location(self, self.gcc_uint_to_float_cast(value, dest_ty))
     }
 
     fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_int_to_float_cast(value, dest_ty)
+        set_rvalue_location(self, self.gcc_int_to_float_cast(value, dest_ty))
     }
 
     fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
         // TODO(antoyo): make sure it truncates.
-        self.context.new_cast(None, value, dest_ty)
+        set_rvalue_location(self, self.context.new_cast(self.location, value, dest_ty))
     }
 
     fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.context.new_cast(None, value, dest_ty)
+        set_rvalue_location(self, self.context.new_cast(self.location, value, dest_ty))
     }
 
     fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
@@ -1028,7 +1239,12 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         self.cx.const_bitcast(value, dest_ty)
     }
 
-    fn intcast(&mut self, value: RValue<'gcc>, dest_typ: Type<'gcc>, _is_signed: bool) -> RValue<'gcc> {
+    fn intcast(
+        &mut self,
+        value: RValue<'gcc>,
+        dest_typ: Type<'gcc>,
+        _is_signed: bool,
+    ) -> RValue<'gcc> {
         // NOTE: is_signed is for value, not dest_typ.
         self.gcc_int_cast(value, dest_typ)
     }
@@ -1039,13 +1255,17 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
             (false, true) => {
                 // NOTE: Projecting a field of a pointer type will attempt a cast from a signed char to
                 // a pointer, which is not supported by gccjit.
-                return self.cx.context.new_cast(None, self.inttoptr(value, val_type.make_pointer()), dest_ty);
-            },
+                self.cx.context.new_cast(
+                    self.location,
+                    self.inttoptr(value, val_type.make_pointer()),
+                    dest_ty,
+                )
+            }
             (false, false) => {
                 // When they are not pointers, we want a transmute (or reinterpret_cast).
                 self.bitcast(value, dest_ty)
-            },
-            (true, true) => self.cx.context.new_cast(None, value, dest_ty),
+            }
+            (true, true) => self.cx.context.new_cast(self.location, value, dest_ty),
             (true, false) => unimplemented!(),
         }
     }
@@ -1056,11 +1276,19 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     fn fcmp(&mut self, op: RealPredicate, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
-        self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+        self.context.new_comparison(self.location, op.to_gcc_comparison(), lhs, rhs)
     }
 
     /* Miscellaneous instructions */
-    fn memcpy(&mut self, dst: RValue<'gcc>, _dst_align: Align, src: RValue<'gcc>, _src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+    fn memcpy(
+        &mut self,
+        dst: RValue<'gcc>,
+        _dst_align: Align,
+        src: RValue<'gcc>,
+        _src_align: Align,
+        size: RValue<'gcc>,
+        flags: MemFlags,
+    ) {
         assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
         let size = self.intcast(size, self.type_size_t(), false);
         let _is_volatile = flags.contains(MemFlags::VOLATILE);
@@ -1068,10 +1296,21 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let src = self.pointercast(src, self.type_ptr_to(self.type_void()));
         let memcpy = self.context.get_builtin_function("memcpy");
         // TODO(antoyo): handle aligns and is_volatile.
-        self.block.add_eval(None, self.context.new_call(None, memcpy, &[dst, src, size]));
+        self.block.add_eval(
+            self.location,
+            self.context.new_call(self.location, memcpy, &[dst, src, size]),
+        );
     }
 
-    fn memmove(&mut self, dst: RValue<'gcc>, dst_align: Align, src: RValue<'gcc>, src_align: Align, size: RValue<'gcc>, flags: MemFlags) {
+    fn memmove(
+        &mut self,
+        dst: RValue<'gcc>,
+        dst_align: Align,
+        src: RValue<'gcc>,
+        src_align: Align,
+        size: RValue<'gcc>,
+        flags: MemFlags,
+    ) {
         if flags.contains(MemFlags::NONTEMPORAL) {
             // HACK(nox): This is inefficient but there is no nontemporal memmove.
             let val = self.load(src.get_type().get_pointee().expect("get_pointee"), src, src_align);
@@ -1086,35 +1325,53 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
 
         let memmove = self.context.get_builtin_function("memmove");
         // TODO(antoyo): handle is_volatile.
-        self.block.add_eval(None, self.context.new_call(None, memmove, &[dst, src, size]));
+        self.block.add_eval(
+            self.location,
+            self.context.new_call(self.location, memmove, &[dst, src, size]),
+        );
     }
 
-    fn memset(&mut self, ptr: RValue<'gcc>, fill_byte: RValue<'gcc>, size: RValue<'gcc>, _align: Align, flags: MemFlags) {
+    fn memset(
+        &mut self,
+        ptr: RValue<'gcc>,
+        fill_byte: RValue<'gcc>,
+        size: RValue<'gcc>,
+        _align: Align,
+        flags: MemFlags,
+    ) {
         let _is_volatile = flags.contains(MemFlags::VOLATILE);
         let ptr = self.pointercast(ptr, self.type_i8p());
         let memset = self.context.get_builtin_function("memset");
         // TODO(antoyo): handle align and is_volatile.
-        let fill_byte = self.context.new_cast(None, fill_byte, self.i32_type);
+        let fill_byte = self.context.new_cast(self.location, fill_byte, self.i32_type);
         let size = self.intcast(size, self.type_size_t(), false);
-        self.block.add_eval(None, self.context.new_call(None, memset, &[ptr, fill_byte, size]));
+        self.block.add_eval(
+            self.location,
+            self.context.new_call(self.location, memset, &[ptr, fill_byte, size]),
+        );
     }
 
-    fn select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, mut else_val: RValue<'gcc>) -> RValue<'gcc> {
+    fn select(
+        &mut self,
+        cond: RValue<'gcc>,
+        then_val: RValue<'gcc>,
+        mut else_val: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         let func = self.current_func();
-        let variable = func.new_local(None, then_val.get_type(), "selectVar");
+        let variable = func.new_local(self.location, then_val.get_type(), "selectVar");
         let then_block = func.new_block("then");
         let else_block = func.new_block("else");
         let after_block = func.new_block("after");
-        self.llbb().end_with_conditional(None, cond, then_block, else_block);
+        self.llbb().end_with_conditional(self.location, cond, then_block, else_block);
 
-        then_block.add_assignment(None, variable, then_val);
-        then_block.end_with_jump(None, after_block);
+        then_block.add_assignment(self.location, variable, then_val);
+        then_block.end_with_jump(self.location, after_block);
 
         if !then_val.get_type().is_compatible_with(else_val.get_type()) {
-            else_val = self.context.new_cast(None, else_val, then_val.get_type());
+            else_val = self.context.new_cast(self.location, else_val, then_val.get_type());
         }
-        else_block.add_assignment(None, variable, else_val);
-        else_block.end_with_jump(None, after_block);
+        else_block.add_assignment(self.location, variable, else_val);
+        else_block.end_with_jump(self.location, after_block);
 
         // NOTE: since jumps were added in a place rustc does not expect, the current block in the
         // state need to be updated.
@@ -1128,19 +1385,24 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         unimplemented!();
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> {
-        self.context.new_vector_access(None, vec, idx).to_rvalue()
+        self.context.new_vector_access(self.location, vec, idx).to_rvalue()
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     fn extract_element(&mut self, vec: RValue<'gcc>, idx: RValue<'gcc>) -> RValue<'gcc> {
-        let vector_type = vec.get_type().unqualified().dyncast_vector().expect("Called extract_element on a non-vector type");
+        let vector_type = vec
+            .get_type()
+            .unqualified()
+            .dyncast_vector()
+            .expect("Called extract_element on a non-vector type");
         let element_type = vector_type.get_element_type();
         let vec_num_units = vector_type.get_num_units();
-        let array_type = self.context.new_array_type(None, element_type, vec_num_units as u64);
-        let array = self.context.new_bitcast(None, vec, array_type).to_rvalue();
-        self.context.new_array_access(None, array, idx).to_rvalue()
+        let array_type =
+            self.context.new_array_type(self.location, element_type, vec_num_units as u64);
+        let array = self.context.new_bitcast(self.location, vec, array_type).to_rvalue();
+        self.context.new_array_access(self.location, array, idx).to_rvalue()
     }
 
     fn vector_splat(&mut self, _num_elts: usize, _elt: RValue<'gcc>) -> RValue<'gcc> {
@@ -1153,82 +1415,85 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let value_type = aggregate_value.get_type();
 
         if value_type.dyncast_array().is_some() {
-            let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
-            let element = self.context.new_array_access(None, aggregate_value, index);
-            element.get_address(None)
-        }
-        else if value_type.dyncast_vector().is_some() {
+            let index = self
+                .context
+                .new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+            let element = self.context.new_array_access(self.location, aggregate_value, index);
+            element.get_address(self.location)
+        } else if value_type.dyncast_vector().is_some() {
             panic!();
-        }
-        else if let Some(pointer_type) = value_type.get_pointee() {
+        } else if let Some(pointer_type) = value_type.get_pointee() {
             if let Some(struct_type) = pointer_type.is_struct() {
                 // NOTE: hack to workaround a limitation of the rustc API: see comment on
                 // CodegenCx.structs_as_pointer
-                aggregate_value.dereference_field(None, struct_type.get_field(idx as i32)).to_rvalue()
-            }
-            else {
+                aggregate_value
+                    .dereference_field(self.location, struct_type.get_field(idx as i32))
+                    .to_rvalue()
+            } else {
                 panic!("Unexpected type {:?}", value_type);
             }
-        }
-        else if let Some(struct_type) = value_type.is_struct() {
-            aggregate_value.access_field(None, struct_type.get_field(idx as i32)).to_rvalue()
-        }
-        else {
+        } else if let Some(struct_type) = value_type.is_struct() {
+            aggregate_value
+                .access_field(self.location, struct_type.get_field(idx as i32))
+                .to_rvalue()
+        } else {
             panic!("Unexpected type {:?}", value_type);
         }
     }
 
-    fn insert_value(&mut self, aggregate_value: RValue<'gcc>, value: RValue<'gcc>, idx: u64) -> RValue<'gcc> {
+    fn insert_value(
+        &mut self,
+        aggregate_value: RValue<'gcc>,
+        value: RValue<'gcc>,
+        idx: u64,
+    ) -> RValue<'gcc> {
         // FIXME(antoyo): it would be better if the API only called this on struct, not on arrays.
         assert_eq!(idx as usize as u64, idx);
         let value_type = aggregate_value.get_type();
 
-        let lvalue =
-            if value_type.dyncast_array().is_some() {
-                let index = self.context.new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
-                self.context.new_array_access(None, aggregate_value, index)
-            }
-            else if value_type.dyncast_vector().is_some() {
-                panic!();
-            }
-            else if let Some(pointer_type) = value_type.get_pointee() {
-                if let Some(struct_type) = pointer_type.is_struct() {
-                    // NOTE: hack to workaround a limitation of the rustc API: see comment on
-                    // CodegenCx.structs_as_pointer
-                    aggregate_value.dereference_field(None, struct_type.get_field(idx as i32))
-                }
-                else {
-                    panic!("Unexpected type {:?}", value_type);
-                }
-            }
-            else {
+        let lvalue = if value_type.dyncast_array().is_some() {
+            let index = self
+                .context
+                .new_rvalue_from_long(self.u64_type, i64::try_from(idx).expect("i64::try_from"));
+            self.context.new_array_access(self.location, aggregate_value, index)
+        } else if value_type.dyncast_vector().is_some() {
+            panic!();
+        } else if let Some(pointer_type) = value_type.get_pointee() {
+            if let Some(struct_type) = pointer_type.is_struct() {
+                // NOTE: hack to workaround a limitation of the rustc API: see comment on
+                // CodegenCx.structs_as_pointer
+                aggregate_value.dereference_field(self.location, struct_type.get_field(idx as i32))
+            } else {
                 panic!("Unexpected type {:?}", value_type);
-            };
+            }
+        } else {
+            panic!("Unexpected type {:?}", value_type);
+        };
 
         let lvalue_type = lvalue.to_rvalue().get_type();
         let value =
             // NOTE: sometimes, rustc will create a value with the wrong type.
             if lvalue_type != value.get_type() {
-                self.context.new_cast(None, value, lvalue_type)
+                self.context.new_cast(self.location, value, lvalue_type)
             }
             else {
                 value
             };
 
-        self.llbb().add_assignment(None, lvalue, value);
+        self.llbb().add_assignment(self.location, lvalue, value);
 
         aggregate_value
     }
 
     fn set_personality_fn(&mut self, _personality: RValue<'gcc>) {
-        #[cfg(feature="master")]
+        #[cfg(feature = "master")]
         {
             let personality = self.rvalue_as_function(_personality);
             self.current_func().set_personality_function(personality);
         }
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     fn cleanup_landing_pad(&mut self, pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
         self.set_personality_fn(pers_fn);
 
@@ -1236,23 +1501,27 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         // generate a try/finally instead of a try/catch for this block.
         self.cleanup_blocks.borrow_mut().insert(self.block);
 
-        let eh_pointer_builtin = self.cx.context.get_target_builtin_function("__builtin_eh_pointer");
+        let eh_pointer_builtin =
+            self.cx.context.get_target_builtin_function("__builtin_eh_pointer");
         let zero = self.cx.context.new_rvalue_zero(self.int_type);
-        let ptr = self.cx.context.new_call(None, eh_pointer_builtin, &[zero]);
+        let ptr = self.cx.context.new_call(self.location, eh_pointer_builtin, &[zero]);
 
         let value1_type = self.u8_type.make_pointer();
-        let ptr = self.cx.context.new_cast(None, ptr, value1_type);
+        let ptr = self.cx.context.new_cast(self.location, ptr, value1_type);
         let value1 = ptr;
         let value2 = zero; // TODO(antoyo): set the proper value here (the type of exception?).
 
         (value1, value2)
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     fn cleanup_landing_pad(&mut self, _pers_fn: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
-        let value1 = self.current_func().new_local(None, self.u8_type.make_pointer(), "landing_pad0")
-                .to_rvalue();
-        let value2 = self.current_func().new_local(None, self.i32_type, "landing_pad1").to_rvalue();
+        let value1 = self
+            .current_func()
+            .new_local(self.location, self.u8_type.make_pointer(), "landing_pad0")
+            .to_rvalue();
+        let value2 =
+            self.current_func().new_local(self.location, self.i32_type, "landing_pad1").to_rvalue();
         (value1, value2)
     }
 
@@ -1261,16 +1530,17 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         self.cleanup_landing_pad(pers_fn)
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     fn resume(&mut self, exn0: RValue<'gcc>, _exn1: RValue<'gcc>) {
         let exn_type = exn0.get_type();
-        let exn = self.context.new_cast(None, exn0, exn_type);
+        let exn = self.context.new_cast(self.location, exn0, exn_type);
         let unwind_resume = self.context.get_target_builtin_function("__builtin_unwind_resume");
-        self.llbb().add_eval(None, self.context.new_call(None, unwind_resume, &[exn]));
+        self.llbb()
+            .add_eval(self.location, self.context.new_call(self.location, unwind_resume, &[exn]));
         self.unreachable();
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     fn resume(&mut self, _exn0: RValue<'gcc>, _exn1: RValue<'gcc>) {
         self.unreachable();
     }
@@ -1297,68 +1567,82 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     // Atomic Operations
-    fn atomic_cmpxchg(&mut self, dst: RValue<'gcc>, cmp: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering, failure_order: AtomicOrdering, weak: bool) -> (RValue<'gcc>, RValue<'gcc>) {
+    fn atomic_cmpxchg(
+        &mut self,
+        dst: RValue<'gcc>,
+        cmp: RValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+        failure_order: AtomicOrdering,
+        weak: bool,
+    ) -> (RValue<'gcc>, RValue<'gcc>) {
         let expected = self.current_func().new_local(None, cmp.get_type(), "expected");
         self.llbb().add_assignment(None, expected, cmp);
         // NOTE: gcc doesn't support a failure memory model that is stronger than the success
         // memory model.
-        let order =
-            if failure_order as i32 > order as i32 {
-                failure_order
-            }
-            else {
-                order
-            };
+        let order = if failure_order as i32 > order as i32 { failure_order } else { order };
         let success = self.compare_exchange(dst, expected, src, order, failure_order, weak);
 
         // NOTE: since success contains the call to the intrinsic, it must be added to the basic block before
         // expected so that we store expected after the call.
-        let success_var = self.current_func().new_local(None, self.bool_type, "success");
-        self.llbb().add_assignment(None, success_var, success);
+        let success_var = self.current_func().new_local(self.location, self.bool_type, "success");
+        self.llbb().add_assignment(self.location, success_var, success);
 
         (expected.to_rvalue(), success_var.to_rvalue())
     }
 
-    fn atomic_rmw(&mut self, op: AtomicRmwBinOp, dst: RValue<'gcc>, src: RValue<'gcc>, order: AtomicOrdering) -> RValue<'gcc> {
+    fn atomic_rmw(
+        &mut self,
+        op: AtomicRmwBinOp,
+        dst: RValue<'gcc>,
+        src: RValue<'gcc>,
+        order: AtomicOrdering,
+    ) -> RValue<'gcc> {
         let size = src.get_type().get_size();
-        let name =
-            match op {
-                AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
-                AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
-                AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
-                AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
-                AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
-                AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
-                AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
-                AtomicRmwBinOp::AtomicMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
-                AtomicRmwBinOp::AtomicMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
-                AtomicRmwBinOp::AtomicUMax => return self.atomic_extremum(ExtremumOperation::Max, dst, src, order),
-                AtomicRmwBinOp::AtomicUMin => return self.atomic_extremum(ExtremumOperation::Min, dst, src, order),
-            };
-
+        let name = match op {
+            AtomicRmwBinOp::AtomicXchg => format!("__atomic_exchange_{}", size),
+            AtomicRmwBinOp::AtomicAdd => format!("__atomic_fetch_add_{}", size),
+            AtomicRmwBinOp::AtomicSub => format!("__atomic_fetch_sub_{}", size),
+            AtomicRmwBinOp::AtomicAnd => format!("__atomic_fetch_and_{}", size),
+            AtomicRmwBinOp::AtomicNand => format!("__atomic_fetch_nand_{}", size),
+            AtomicRmwBinOp::AtomicOr => format!("__atomic_fetch_or_{}", size),
+            AtomicRmwBinOp::AtomicXor => format!("__atomic_fetch_xor_{}", size),
+            AtomicRmwBinOp::AtomicMax => {
+                return self.atomic_extremum(ExtremumOperation::Max, dst, src, order);
+            }
+            AtomicRmwBinOp::AtomicMin => {
+                return self.atomic_extremum(ExtremumOperation::Min, dst, src, order);
+            }
+            AtomicRmwBinOp::AtomicUMax => {
+                return self.atomic_extremum(ExtremumOperation::Max, dst, src, order);
+            }
+            AtomicRmwBinOp::AtomicUMin => {
+                return self.atomic_extremum(ExtremumOperation::Min, dst, src, order);
+            }
+        };
 
         let atomic_function = self.context.get_builtin_function(name);
         let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
 
         let void_ptr_type = self.context.new_type::<*mut ()>();
         let volatile_void_ptr_type = void_ptr_type.make_volatile();
-        let dst = self.context.new_cast(None, dst, volatile_void_ptr_type);
+        let dst = self.context.new_cast(self.location, dst, volatile_void_ptr_type);
         // FIXME(antoyo): not sure why, but we have the wrong type here.
         let new_src_type = atomic_function.get_param(1).to_rvalue().get_type();
-        let src = self.context.new_cast(None, src, new_src_type);
-        let res = self.context.new_call(None, atomic_function, &[dst, src, order]);
-        self.context.new_cast(None, res, src.get_type())
+        let src = self.context.new_cast(self.location, src, new_src_type);
+        let res = self.context.new_call(self.location, atomic_function, &[dst, src, order]);
+        self.context.new_cast(self.location, res, src.get_type())
     }
 
     fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope) {
-        let name =
-            match scope {
-                SynchronizationScope::SingleThread => "__atomic_signal_fence",
-                SynchronizationScope::CrossThread => "__atomic_thread_fence",
-            };
+        let name = match scope {
+            SynchronizationScope::SingleThread => "__atomic_signal_fence",
+            SynchronizationScope::CrossThread => "__atomic_thread_fence",
+        };
         let thread_fence = self.context.get_builtin_function(name);
         let order = self.context.new_rvalue_from_int(self.i32_type, order.to_gcc());
-        self.llbb().add_eval(None, self.context.new_call(None, thread_fence, &[order]));
+        self.llbb()
+            .add_eval(self.location, self.context.new_call(self.location, thread_fence, &[order]));
     }
 
     fn set_invariant_load(&mut self, load: RValue<'gcc>) {
@@ -1388,8 +1672,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         let gcc_func = unsafe { std::mem::transmute(func) };
         let call = if self.functions.borrow().values().any(|value| *value == gcc_func) {
             self.function_call(func, args, funclet)
-        }
-        else {
+        } else {
             // If it's a not function that was defined, it's a function pointer.
             self.function_ptr_call(typ, func, args, funclet)
         };
@@ -1422,8 +1705,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     fn from_immediate(&mut self, val: Self::Value) -> Self::Value {
         if self.cx().val_ty(val) == self.cx().type_i1() {
             self.zext(val, self.cx().type_i8())
-        }
-        else {
+        } else {
             val
         }
     }
@@ -1443,13 +1725,24 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         self.fptoint_sat(true, val, dest_ty)
     }
 
-    fn instrprof_increment(&mut self, _fn_name: RValue<'gcc>, _hash: RValue<'gcc>, _num_counters: RValue<'gcc>, _index: RValue<'gcc>) {
+    fn instrprof_increment(
+        &mut self,
+        _fn_name: RValue<'gcc>,
+        _hash: RValue<'gcc>,
+        _num_counters: RValue<'gcc>,
+        _index: RValue<'gcc>,
+    ) {
         unimplemented!();
     }
 }
 
 impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
-    fn fptoint_sat(&mut self, signed: bool, val: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
+    fn fptoint_sat(
+        &mut self,
+        signed: bool,
+        val: RValue<'gcc>,
+        dest_ty: Type<'gcc>,
+    ) -> RValue<'gcc> {
         let src_ty = self.cx.val_ty(val);
         let (float_ty, int_ty) = if self.cx.type_kind(src_ty) == TypeKind::Vector {
             assert_eq!(self.cx.vector_length(src_ty), self.cx.vector_length(dest_ty));
@@ -1486,10 +1779,18 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
         let int_max = |signed: bool, int_width: u64| -> u128 {
             let shift_amount = 128 - int_width;
-            if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount }
+            if signed {
+                i128::MAX as u128 >> shift_amount
+            } else {
+                u128::MAX >> shift_amount
+            }
         };
         let int_min = |signed: bool, int_width: u64| -> i128 {
-            if signed { i128::MIN >> (128 - int_width) } else { 0 }
+            if signed {
+                i128::MIN >> (128 - int_width)
+            } else {
+                0
+            }
         };
 
         let compute_clamp_bounds_single = |signed: bool, int_width: u64| -> (u128, u128) {
@@ -1573,7 +1874,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let zero = maybe_splat(self, zero);
 
         // Step 1 ...
-        let fptosui_result = if signed { self.fptosi(val, dest_ty) } else { self.fptoui(val, dest_ty) };
+        let fptosui_result =
+            if signed { self.fptosi(val, dest_ty) } else { self.fptoui(val, dest_ty) };
         let less_or_nan = self.fcmp(RealPredicate::RealULT, val, f_min);
         let greater = self.fcmp(RealPredicate::RealOGT, val, f_max);
 
@@ -1609,8 +1911,13 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         }
     }
 
-    #[cfg(feature="master")]
-    pub fn shuffle_vector(&mut self, v1: RValue<'gcc>, v2: RValue<'gcc>, mask: RValue<'gcc>) -> RValue<'gcc> {
+    #[cfg(feature = "master")]
+    pub fn shuffle_vector(
+        &mut self,
+        v1: RValue<'gcc>,
+        v2: RValue<'gcc>,
+        mask: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         let struct_type = mask.get_type().is_struct().expect("mask should be of struct type");
 
         // TODO(antoyo): use a recursive unqualified() here.
@@ -1620,21 +1927,23 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 
         let mask_num_units = struct_type.get_field_count();
         let mut vector_elements = vec![];
-        let mask_element_type =
-            if element_type.is_integral() {
-                element_type
+        let mask_element_type = if element_type.is_integral() {
+            element_type
+        } else {
+            #[cfg(feature = "master")]
+            {
+                self.cx.type_ix(element_type.get_size() as u64 * 8)
             }
-            else {
-                #[cfg(feature="master")]
-                {
-                    self.cx.type_ix(element_type.get_size() as u64 * 8)
-                }
-                #[cfg(not(feature="master"))]
-                self.int_type
-            };
+            #[cfg(not(feature = "master"))]
+            self.int_type
+        };
         for i in 0..mask_num_units {
             let field = struct_type.get_field(i as i32);
-            vector_elements.push(self.context.new_cast(None, mask.access_field(None, field).to_rvalue(), mask_element_type));
+            vector_elements.push(self.context.new_cast(
+                self.location,
+                mask.access_field(self.location, field).to_rvalue(),
+                mask_element_type,
+            ));
         }
 
         // NOTE: the mask needs to be the same length as the input vectors, so add the missing
@@ -1644,53 +1953,84 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         }
 
         let result_type = self.context.new_vector_type(element_type, mask_num_units as u64);
-        let (v1, v2) =
-            if vec_num_units < mask_num_units {
-                // NOTE: the mask needs to be the same length as the input vectors, so join the 2
-                // vectors and create a dummy second vector.
-                let mut elements = vec![];
-                for i in 0..vec_num_units {
-                    elements.push(self.context.new_vector_access(None, v1, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
-                }
-                for i in 0..(mask_num_units - vec_num_units) {
-                    elements.push(self.context.new_vector_access(None, v2, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
-                }
-                let v1 = self.context.new_rvalue_from_vector(None, result_type, &elements);
-                let zero = self.context.new_rvalue_zero(element_type);
-                let v2 = self.context.new_rvalue_from_vector(None, result_type, &vec![zero; mask_num_units]);
-                (v1, v2)
+        let (v1, v2) = if vec_num_units < mask_num_units {
+            // NOTE: the mask needs to be the same length as the input vectors, so join the 2
+            // vectors and create a dummy second vector.
+            let mut elements = vec![];
+            for i in 0..vec_num_units {
+                elements.push(
+                    self.context
+                        .new_vector_access(
+                            self.location,
+                            v1,
+                            self.context.new_rvalue_from_int(self.int_type, i as i32),
+                        )
+                        .to_rvalue(),
+                );
             }
-            else {
-                (v1, v2)
-            };
+            for i in 0..(mask_num_units - vec_num_units) {
+                elements.push(
+                    self.context
+                        .new_vector_access(
+                            self.location,
+                            v2,
+                            self.context.new_rvalue_from_int(self.int_type, i as i32),
+                        )
+                        .to_rvalue(),
+                );
+            }
+            let v1 = self.context.new_rvalue_from_vector(self.location, result_type, &elements);
+            let zero = self.context.new_rvalue_zero(element_type);
+            let v2 = self.context.new_rvalue_from_vector(
+                self.location,
+                result_type,
+                &vec![zero; mask_num_units],
+            );
+            (v1, v2)
+        } else {
+            (v1, v2)
+        };
 
         let new_mask_num_units = std::cmp::max(mask_num_units, vec_num_units);
         let mask_type = self.context.new_vector_type(mask_element_type, new_mask_num_units as u64);
-        let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
-        let result = self.context.new_rvalue_vector_perm(None, v1, v2, mask);
+        let mask = self.context.new_rvalue_from_vector(self.location, mask_type, &vector_elements);
+        let result = self.context.new_rvalue_vector_perm(self.location, v1, v2, mask);
 
         if vec_num_units != mask_num_units {
             // NOTE: if padding was added, only select the number of elements of the masks to
             // remove that padding in the result.
             let mut elements = vec![];
             for i in 0..mask_num_units {
-                elements.push(self.context.new_vector_access(None, result, self.context.new_rvalue_from_int(self.int_type, i as i32)).to_rvalue());
+                elements.push(
+                    self.context
+                        .new_vector_access(
+                            self.location,
+                            result,
+                            self.context.new_rvalue_from_int(self.int_type, i as i32),
+                        )
+                        .to_rvalue(),
+                );
             }
-            self.context.new_rvalue_from_vector(None, result_type, &elements)
-        }
-        else {
+            self.context.new_rvalue_from_vector(self.location, result_type, &elements)
+        } else {
             result
         }
     }
 
-    #[cfg(not(feature="master"))]
-    pub fn shuffle_vector(&mut self, _v1: RValue<'gcc>, _v2: RValue<'gcc>, _mask: RValue<'gcc>) -> RValue<'gcc> {
+    #[cfg(not(feature = "master"))]
+    pub fn shuffle_vector(
+        &mut self,
+        _v1: RValue<'gcc>,
+        _v2: RValue<'gcc>,
+        _mask: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         unimplemented!();
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn vector_reduce<F>(&mut self, src: RValue<'gcc>, op: F) -> RValue<'gcc>
-    where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
+    where
+        F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>,
     {
         let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
         let element_type = vector_type.get_element_type();
@@ -1704,130 +2044,178 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let mut shift = 1;
         let mut res = src;
         while shift < element_count {
-            let vector_elements: Vec<_> =
-                vector_elements.iter()
-                    .map(|i| self.context.new_rvalue_from_int(mask_element_type, ((i + shift) % element_count) as i32))
-                    .collect();
-            let mask = self.context.new_rvalue_from_vector(None, mask_type, &vector_elements);
-            let shifted = self.context.new_rvalue_vector_perm(None, res, res, mask);
+            let vector_elements: Vec<_> = vector_elements
+                .iter()
+                .map(|i| {
+                    self.context.new_rvalue_from_int(
+                        mask_element_type,
+                        ((i + shift) % element_count) as i32,
+                    )
+                })
+                .collect();
+            let mask =
+                self.context.new_rvalue_from_vector(self.location, mask_type, &vector_elements);
+            let shifted = self.context.new_rvalue_vector_perm(self.location, res, res, mask);
             shift *= 2;
             res = op(res, shifted, &self.context);
         }
-        self.context.new_vector_access(None, res, self.context.new_rvalue_zero(self.int_type))
+        self.context
+            .new_vector_access(self.location, res, self.context.new_rvalue_zero(self.int_type))
             .to_rvalue()
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     pub fn vector_reduce<F>(&mut self, _src: RValue<'gcc>, _op: F) -> RValue<'gcc>
-    where F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>
+    where
+        F: Fn(RValue<'gcc>, RValue<'gcc>, &'gcc Context<'gcc>) -> RValue<'gcc>,
     {
         unimplemented!();
     }
 
     pub fn vector_reduce_op(&mut self, src: RValue<'gcc>, op: BinaryOp) -> RValue<'gcc> {
-        self.vector_reduce(src, |a, b, context| context.new_binary_op(None, op, a.get_type(), a, b))
+        let loc = self.location.clone();
+        self.vector_reduce(src, |a, b, context| context.new_binary_op(loc, op, a.get_type(), a, b))
     }
 
-    pub fn vector_reduce_fadd_reassoc(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
+    pub fn vector_reduce_fadd_reassoc(
+        &mut self,
+        _acc: RValue<'gcc>,
+        _src: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         unimplemented!();
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn vector_reduce_fadd(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> {
         let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
         let element_count = vector_type.get_num_units();
-        (0..element_count).into_iter()
-            .map(|i| self.context
-                .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
-                .to_rvalue())
+        (0..element_count)
+            .into_iter()
+            .map(|i| {
+                self.context
+                    .new_vector_access(
+                        self.location,
+                        src,
+                        self.context.new_rvalue_from_int(self.int_type, i as _),
+                    )
+                    .to_rvalue()
+            })
             .fold(acc, |x, i| x + i)
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     pub fn vector_reduce_fadd(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
         unimplemented!();
     }
 
-    pub fn vector_reduce_fmul_reassoc(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
+    pub fn vector_reduce_fmul_reassoc(
+        &mut self,
+        _acc: RValue<'gcc>,
+        _src: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         unimplemented!();
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn vector_reduce_fmul(&mut self, acc: RValue<'gcc>, src: RValue<'gcc>) -> RValue<'gcc> {
         let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
         let element_count = vector_type.get_num_units();
-        (0..element_count).into_iter()
-            .map(|i| self.context
-                .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
-                .to_rvalue())
+        (0..element_count)
+            .into_iter()
+            .map(|i| {
+                self.context
+                    .new_vector_access(
+                        self.location,
+                        src,
+                        self.context.new_rvalue_from_int(self.int_type, i as _),
+                    )
+                    .to_rvalue()
+            })
             .fold(acc, |x, i| x * i)
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     pub fn vector_reduce_fmul(&mut self, _acc: RValue<'gcc>, _src: RValue<'gcc>) -> RValue<'gcc> {
         unimplemented!()
     }
 
     // Inspired by Hacker's Delight min implementation.
     pub fn vector_reduce_min(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+        let loc = self.location.clone();
         self.vector_reduce(src, |a, b, context| {
-            let differences_or_zeros = difference_or_zero(a, b, context);
-            context.new_binary_op(None, BinaryOp::Plus, b.get_type(), b, differences_or_zeros)
+            let differences_or_zeros = difference_or_zero(loc, a, b, context);
+            context.new_binary_op(loc, BinaryOp::Plus, b.get_type(), b, differences_or_zeros)
         })
     }
 
     // Inspired by Hacker's Delight max implementation.
     pub fn vector_reduce_max(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
+        let loc = self.location.clone();
         self.vector_reduce(src, |a, b, context| {
-            let differences_or_zeros = difference_or_zero(a, b, context);
-            context.new_binary_op(None, BinaryOp::Minus, a.get_type(), a, differences_or_zeros)
+            let differences_or_zeros = difference_or_zero(loc, a, b, context);
+            context.new_binary_op(loc, BinaryOp::Minus, a.get_type(), a, differences_or_zeros)
         })
     }
 
-    fn vector_extremum(&mut self, a: RValue<'gcc>, b: RValue<'gcc>, direction: ExtremumOperation) -> RValue<'gcc> {
+    fn vector_extremum(
+        &mut self,
+        a: RValue<'gcc>,
+        b: RValue<'gcc>,
+        direction: ExtremumOperation,
+    ) -> RValue<'gcc> {
         let vector_type = a.get_type();
 
         // mask out the NaNs in b and replace them with the corresponding lane in a, so when a and
         // b get compared & spliced together, we get the numeric values instead of NaNs.
-        let b_nan_mask = self.context.new_comparison(None, ComparisonOp::NotEquals, b, b);
+        let b_nan_mask = self.context.new_comparison(self.location, ComparisonOp::NotEquals, b, b);
         let mask_type = b_nan_mask.get_type();
-        let b_nan_mask_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, mask_type, b_nan_mask);
-        let a_cast = self.context.new_bitcast(None, a, mask_type);
-        let b_cast = self.context.new_bitcast(None, b, mask_type);
+        let b_nan_mask_inverted =
+            self.context.new_unary_op(self.location, UnaryOp::BitwiseNegate, mask_type, b_nan_mask);
+        let a_cast = self.context.new_bitcast(self.location, a, mask_type);
+        let b_cast = self.context.new_bitcast(self.location, b, mask_type);
         let res = (b_nan_mask & a_cast) | (b_nan_mask_inverted & b_cast);
-        let b = self.context.new_bitcast(None, res, vector_type);
+        let b = self.context.new_bitcast(self.location, res, vector_type);
 
         // now do the actual comparison
         let comparison_op = match direction {
             ExtremumOperation::Min => ComparisonOp::LessThan,
             ExtremumOperation::Max => ComparisonOp::GreaterThan,
         };
-        let cmp = self.context.new_comparison(None, comparison_op, a, b);
-        let cmp_inverted = self.context.new_unary_op(None, UnaryOp::BitwiseNegate, cmp.get_type(), cmp);
+        let cmp = self.context.new_comparison(self.location, comparison_op, a, b);
+        let cmp_inverted =
+            self.context.new_unary_op(self.location, UnaryOp::BitwiseNegate, cmp.get_type(), cmp);
         let res = (cmp & a_cast) | (cmp_inverted & res);
-        self.context.new_bitcast(None, res, vector_type)
+        self.context.new_bitcast(self.location, res, vector_type)
     }
 
     pub fn vector_fmin(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
         self.vector_extremum(a, b, ExtremumOperation::Min)
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn vector_reduce_fmin(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
         let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
         let element_count = vector_type.get_num_units();
-        let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue();
+        let mut acc = self
+            .context
+            .new_vector_access(self.location, src, self.context.new_rvalue_zero(self.int_type))
+            .to_rvalue();
         for i in 1..element_count {
-            let elem = self.context
-                .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
+            let elem = self
+                .context
+                .new_vector_access(
+                    self.location,
+                    src,
+                    self.context.new_rvalue_from_int(self.int_type, i as _),
+                )
                 .to_rvalue();
-            let cmp = self.context.new_comparison(None, ComparisonOp::LessThan, acc, elem);
+            let cmp = self.context.new_comparison(self.location, ComparisonOp::LessThan, acc, elem);
             acc = self.select(cmp, acc, elem);
         }
         acc
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     pub fn vector_reduce_fmin(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> {
         unimplemented!();
     }
@@ -1836,36 +2224,51 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         self.vector_extremum(a, b, ExtremumOperation::Max)
     }
 
-    #[cfg(feature="master")]
+    #[cfg(feature = "master")]
     pub fn vector_reduce_fmax(&mut self, src: RValue<'gcc>) -> RValue<'gcc> {
         let vector_type = src.get_type().unqualified().dyncast_vector().expect("vector type");
         let element_count = vector_type.get_num_units();
-        let mut acc = self.context.new_vector_access(None, src, self.context.new_rvalue_zero(self.int_type)).to_rvalue();
+        let mut acc = self
+            .context
+            .new_vector_access(self.location, src, self.context.new_rvalue_zero(self.int_type))
+            .to_rvalue();
         for i in 1..element_count {
-            let elem = self.context
-                .new_vector_access(None, src, self.context.new_rvalue_from_int(self.int_type, i as _))
+            let elem = self
+                .context
+                .new_vector_access(
+                    self.location,
+                    src,
+                    self.context.new_rvalue_from_int(self.int_type, i as _),
+                )
                 .to_rvalue();
-            let cmp = self.context.new_comparison(None, ComparisonOp::GreaterThan, acc, elem);
+            let cmp =
+                self.context.new_comparison(self.location, ComparisonOp::GreaterThan, acc, elem);
             acc = self.select(cmp, acc, elem);
         }
         acc
     }
 
-    #[cfg(not(feature="master"))]
+    #[cfg(not(feature = "master"))]
     pub fn vector_reduce_fmax(&mut self, _src: RValue<'gcc>) -> RValue<'gcc> {
         unimplemented!();
     }
 
-    pub fn vector_select(&mut self, cond: RValue<'gcc>, then_val: RValue<'gcc>, else_val: RValue<'gcc>) -> RValue<'gcc> {
+    pub fn vector_select(
+        &mut self,
+        cond: RValue<'gcc>,
+        then_val: RValue<'gcc>,
+        else_val: RValue<'gcc>,
+    ) -> RValue<'gcc> {
         // cond is a vector of integers, not of bools.
         let vector_type = cond.get_type().unqualified().dyncast_vector().expect("vector type");
         let num_units = vector_type.get_num_units();
         let element_type = vector_type.get_element_type();
 
-        #[cfg(feature="master")]
+        #[cfg(feature = "master")]
         let (cond, element_type) = {
             // TODO(antoyo): dyncast_vector should not require a call to unqualified.
-            let then_val_vector_type = then_val.get_type().unqualified().dyncast_vector().expect("vector type");
+            let then_val_vector_type =
+                then_val.get_type().unqualified().dyncast_vector().expect("vector type");
             let then_val_element_type = then_val_vector_type.get_element_type();
             let then_val_element_size = then_val_element_type.get_size();
 
@@ -1873,11 +2276,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             // operation to work.
             if then_val_element_size != element_type.get_size() {
                 let new_element_type = self.type_ix(then_val_element_size as u64 * 8);
-                let new_vector_type = self.context.new_vector_type(new_element_type, num_units as u64);
-                let cond = self.context.convert_vector(None, cond, new_vector_type);
+                let new_vector_type =
+                    self.context.new_vector_type(new_element_type, num_units as u64);
+                let cond = self.context.convert_vector(self.location, cond, new_vector_type);
                 (cond, new_element_type)
-            }
-            else {
+            } else {
                 (cond, element_type)
             }
         };
@@ -1885,24 +2288,25 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let cond_type = cond.get_type();
 
         let zeros = vec![self.context.new_rvalue_zero(element_type); num_units];
-        let zeros = self.context.new_rvalue_from_vector(None, cond_type, &zeros);
+        let zeros = self.context.new_rvalue_from_vector(self.location, cond_type, &zeros);
 
         let result_type = then_val.get_type();
 
-        let masks = self.context.new_comparison(None, ComparisonOp::NotEquals, cond, zeros);
+        let masks =
+            self.context.new_comparison(self.location, ComparisonOp::NotEquals, cond, zeros);
         // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make
         // the & operation work.
         let then_val = self.bitcast_if_needed(then_val, masks.get_type());
         let then_vals = masks & then_val;
 
         let minus_ones = vec![self.context.new_rvalue_from_int(element_type, -1); num_units];
-        let minus_ones = self.context.new_rvalue_from_vector(None, cond_type, &minus_ones);
+        let minus_ones = self.context.new_rvalue_from_vector(self.location, cond_type, &minus_ones);
         let inverted_masks = masks ^ minus_ones;
         // NOTE: sometimes, the type of else_val can be different than the type of then_val in
         // libgccjit (vector of int vs vector of int32_t), but they should be the same for the AND
         // operation to work.
         // TODO: remove bitcast now that vector types can be compared?
-        let else_val = self.context.new_bitcast(None, else_val, then_val.get_type());
+        let else_val = self.context.new_bitcast(self.location, else_val, then_val.get_type());
         let else_vals = inverted_masks & else_val;
 
         let res = then_vals | else_vals;
@@ -1910,26 +2314,26 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
     }
 }
 
-fn difference_or_zero<'gcc>(a: RValue<'gcc>, b: RValue<'gcc>, context: &'gcc Context<'gcc>) -> RValue<'gcc> {
+fn difference_or_zero<'gcc>(
+    loc: Option<Location<'gcc>>,
+    a: RValue<'gcc>,
+    b: RValue<'gcc>,
+    context: &'gcc Context<'gcc>,
+) -> RValue<'gcc> {
     let difference = a - b;
-    let masks = context.new_comparison(None, ComparisonOp::GreaterThanEquals, b, a);
+    let masks = context.new_comparison(loc, ComparisonOp::GreaterThanEquals, b, a);
     // NOTE: masks is a vector of integers, but the values can be vectors of floats, so use bitcast to make
     // the & operation work.
     let a_type = a.get_type();
     let masks =
-        if masks.get_type() != a_type {
-            context.new_bitcast(None, masks, a_type)
-        }
-        else {
-            masks
-        };
+        if masks.get_type() != a_type { context.new_bitcast(loc, masks, a_type) } else { masks };
     difference & masks
 }
 
 impl<'a, 'gcc, 'tcx> StaticBuilderMethods for Builder<'a, 'gcc, 'tcx> {
     fn get_static(&mut self, def_id: DefId) -> RValue<'gcc> {
         // Forward to the `get_static` method of `CodegenCx`
-        self.cx().get_static(def_id).get_address(None)
+        self.cx().get_static(def_id).get_address(self.location)
     }
 }
 
@@ -2009,15 +2413,14 @@ impl ToGccOrdering for AtomicOrdering {
     fn to_gcc(self) -> i32 {
         use MemOrdering::*;
 
-        let ordering =
-            match self {
-                AtomicOrdering::Unordered => __ATOMIC_RELAXED,
-                AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
-                AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
-                AtomicOrdering::Release => __ATOMIC_RELEASE,
-                AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
-                AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
-            };
+        let ordering = match self {
+            AtomicOrdering::Unordered => __ATOMIC_RELAXED,
+            AtomicOrdering::Relaxed => __ATOMIC_RELAXED, // TODO(antoyo): check if that's the same.
+            AtomicOrdering::Acquire => __ATOMIC_ACQUIRE,
+            AtomicOrdering::Release => __ATOMIC_RELEASE,
+            AtomicOrdering::AcquireRelease => __ATOMIC_ACQ_REL,
+            AtomicOrdering::SequentiallyConsistent => __ATOMIC_SEQ_CST,
+        };
         ordering as i32
     }
 }