about summary refs log tree commit diff
path: root/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_codegen_gcc/src/intrinsic/mod.rs')
-rw-r--r--compiler/rustc_codegen_gcc/src/intrinsic/mod.rs1101
1 files changed, 590 insertions, 511 deletions
diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
index d43f5d74757..a6c8b72e851 100644
--- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
+++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs
@@ -1,43 +1,48 @@
 pub mod llvm;
 mod simd;
 
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use std::iter;
 
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use gccjit::FunctionType;
 use gccjit::{ComparisonOp, Function, RValue, ToRValue, Type, UnaryOp};
-use rustc_codegen_ssa::MemFlags;
 use rustc_codegen_ssa::base::wants_msvc_seh;
 use rustc_codegen_ssa::common::IntPredicate;
+use rustc_codegen_ssa::errors::InvalidMonomorphization;
 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
 use rustc_codegen_ssa::mir::place::PlaceRef;
-use rustc_codegen_ssa::traits::{ArgAbiMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
-#[cfg(feature="master")]
+use rustc_codegen_ssa::traits::{
+    ArgAbiMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods,
+};
+#[cfg(feature = "master")]
 use rustc_codegen_ssa::traits::{BaseTypeMethods, MiscMethods};
-use rustc_codegen_ssa::errors::InvalidMonomorphization;
+use rustc_codegen_ssa::MemFlags;
 use rustc_middle::bug;
-use rustc_middle::ty::{self, Instance, Ty};
 use rustc_middle::ty::layout::LayoutOf;
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
-use rustc_span::{Span, Symbol, sym};
-use rustc_target::abi::HasDataLayout;
+use rustc_middle::ty::{self, Instance, Ty};
+use rustc_span::{sym, Span, Symbol};
 use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
-use rustc_target::spec::PanicStrategy;
-#[cfg(feature="master")]
+use rustc_target::abi::HasDataLayout;
+#[cfg(feature = "master")]
 use rustc_target::spec::abi::Abi;
+use rustc_target::spec::PanicStrategy;
 
-use crate::abi::GccType;
-#[cfg(feature="master")]
+#[cfg(feature = "master")]
 use crate::abi::FnAbiGccExt;
+use crate::abi::GccType;
 use crate::builder::Builder;
 use crate::common::{SignType, TypeReflection};
 use crate::context::CodegenCx;
-use crate::type_of::LayoutGccExt;
 use crate::intrinsic::simd::generic_simd_intrinsic;
+use crate::type_of::LayoutGccExt;
 
-fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) -> Option<Function<'gcc>> {
+fn get_simple_intrinsic<'gcc, 'tcx>(
+    cx: &CodegenCx<'gcc, 'tcx>,
+    name: Symbol,
+) -> Option<Function<'gcc>> {
     let gcc_name = match name {
         sym::sqrtf32 => "sqrtf",
         sym::sqrtf64 => "sqrt",
@@ -90,7 +95,14 @@ fn get_simple_intrinsic<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, name: Symbol) ->
 }
 
 impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
-    fn codegen_intrinsic_call(&mut self, instance: Instance<'tcx>, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, args: &[OperandRef<'tcx, RValue<'gcc>>], llresult: RValue<'gcc>, span: Span) -> Result<(), Instance<'tcx>> {
+    fn codegen_intrinsic_call(
+        &mut self,
+        instance: Instance<'tcx>,
+        fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
+        args: &[OperandRef<'tcx, RValue<'gcc>>],
+        llresult: RValue<'gcc>,
+        span: Span,
+    ) -> Result<(), Instance<'tcx>> {
         let tcx = self.tcx;
         let callee_ty = instance.ty(tcx, ty::ParamEnv::reveal_all());
 
@@ -110,268 +122,274 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
         let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
 
         let simple = get_simple_intrinsic(self, name);
-        let llval =
-            match name {
-                _ if simple.is_some() => {
-                    // FIXME(antoyo): remove this cast when the API supports function.
-                    let func = unsafe { std::mem::transmute(simple.expect("simple")) };
-                    self.call(self.type_void(), None, None, func, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None)
-                },
-                sym::likely => {
-                    self.expect(args[0].immediate(), true)
-                }
-                sym::unlikely => {
-                    self.expect(args[0].immediate(), false)
-                }
-                sym::is_val_statically_known => {
-                    let a = args[0].immediate();
-                    let builtin = self.context.get_builtin_function("__builtin_constant_p");
-                    let res = self.context.new_call(None, builtin, &[a]);
-                    self.icmp(IntPredicate::IntEQ, res, self.const_i32(0))
-                }
-                sym::catch_unwind => {
-                    try_intrinsic(
-                        self,
-                        args[0].immediate(),
-                        args[1].immediate(),
-                        args[2].immediate(),
-                        llresult,
-                    );
-                    return Ok(());
-                }
-                sym::breakpoint => {
-                    unimplemented!();
-                }
-                sym::va_copy => {
-                    unimplemented!();
-                }
-                sym::va_arg => {
-                    unimplemented!();
-                }
+        let llval = match name {
+            _ if simple.is_some() => {
+                // FIXME(antoyo): remove this cast when the API supports function.
+                let func = unsafe { std::mem::transmute(simple.expect("simple")) };
+                self.call(
+                    self.type_void(),
+                    None,
+                    None,
+                    func,
+                    &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
+                    None,
+                )
+            }
+            sym::likely => self.expect(args[0].immediate(), true),
+            sym::unlikely => self.expect(args[0].immediate(), false),
+            sym::is_val_statically_known => {
+                let a = args[0].immediate();
+                let builtin = self.context.get_builtin_function("__builtin_constant_p");
+                let res = self.context.new_call(None, builtin, &[a]);
+                self.icmp(IntPredicate::IntEQ, res, self.const_i32(0))
+            }
+            sym::catch_unwind => {
+                try_intrinsic(
+                    self,
+                    args[0].immediate(),
+                    args[1].immediate(),
+                    args[2].immediate(),
+                    llresult,
+                );
+                return Ok(());
+            }
+            sym::breakpoint => {
+                unimplemented!();
+            }
+            sym::va_copy => {
+                unimplemented!();
+            }
+            sym::va_arg => {
+                unimplemented!();
+            }
 
-                sym::volatile_load | sym::unaligned_volatile_load => {
-                    let tp_ty = fn_args.type_at(0);
-                    let ptr = args[0].immediate();
-                    let load =
-                        if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
-                            let gcc_ty = ty.gcc_type(self);
-                            self.volatile_load(gcc_ty, ptr)
+            sym::volatile_load | sym::unaligned_volatile_load => {
+                let tp_ty = fn_args.type_at(0);
+                let ptr = args[0].immediate();
+                let load = if let PassMode::Cast { cast: ty, pad_i32: _ } = &fn_abi.ret.mode {
+                    let gcc_ty = ty.gcc_type(self);
+                    self.volatile_load(gcc_ty, ptr)
+                } else {
+                    self.volatile_load(self.layout_of(tp_ty).gcc_type(self), ptr)
+                };
+                // TODO(antoyo): set alignment.
+                self.to_immediate(load, self.layout_of(tp_ty))
+            }
+            sym::volatile_store => {
+                let dst = args[0].deref(self.cx());
+                args[1].val.volatile_store(self, dst);
+                return Ok(());
+            }
+            sym::unaligned_volatile_store => {
+                let dst = args[0].deref(self.cx());
+                args[1].val.unaligned_volatile_store(self, dst);
+                return Ok(());
+            }
+            sym::prefetch_read_data
+            | sym::prefetch_write_data
+            | sym::prefetch_read_instruction
+            | sym::prefetch_write_instruction => {
+                unimplemented!();
+            }
+            sym::ctlz
+            | sym::ctlz_nonzero
+            | sym::cttz
+            | sym::cttz_nonzero
+            | sym::ctpop
+            | sym::bswap
+            | sym::bitreverse
+            | sym::rotate_left
+            | sym::rotate_right
+            | sym::saturating_add
+            | sym::saturating_sub => {
+                let ty = arg_tys[0];
+                match int_type_width_signed(ty, self) {
+                    Some((width, signed)) => match name {
+                        sym::ctlz | sym::cttz => {
+                            let func = self.current_func.borrow().expect("func");
+                            let then_block = func.new_block("then");
+                            let else_block = func.new_block("else");
+                            let after_block = func.new_block("after");
+
+                            let arg = args[0].immediate();
+                            let result = func.new_local(None, arg.get_type(), "zeros");
+                            let zero = self.cx.gcc_zero(arg.get_type());
+                            let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero);
+                            self.llbb().end_with_conditional(None, cond, then_block, else_block);
+
+                            let zero_result = self.cx.gcc_uint(arg.get_type(), width);
+                            then_block.add_assignment(None, result, zero_result);
+                            then_block.end_with_jump(None, after_block);
+
+                            // NOTE: since jumps were added in a place
+                            // count_leading_zeroes() does not expect, the current block
+                            // in the state need to be updated.
+                            self.switch_to_block(else_block);
+
+                            let zeros = match name {
+                                sym::ctlz => self.count_leading_zeroes(width, arg),
+                                sym::cttz => self.count_trailing_zeroes(width, arg),
+                                _ => unreachable!(),
+                            };
+                            self.llbb().add_assignment(None, result, zeros);
+                            self.llbb().end_with_jump(None, after_block);
+
+                            // NOTE: since jumps were added in a place rustc does not
+                            // expect, the current block in the state need to be updated.
+                            self.switch_to_block(after_block);
+
+                            result.to_rvalue()
                         }
-                        else {
-                            self.volatile_load(self.layout_of(tp_ty).gcc_type(self), ptr)
-                        };
-                    // TODO(antoyo): set alignment.
-                    self.to_immediate(load, self.layout_of(tp_ty))
-                }
-                sym::volatile_store => {
-                    let dst = args[0].deref(self.cx());
-                    args[1].val.volatile_store(self, dst);
-                    return Ok(());
-                }
-                sym::unaligned_volatile_store => {
-                    let dst = args[0].deref(self.cx());
-                    args[1].val.unaligned_volatile_store(self, dst);
-                    return Ok(());
-                }
-                sym::prefetch_read_data
-                    | sym::prefetch_write_data
-                    | sym::prefetch_read_instruction
-                    | sym::prefetch_write_instruction => {
-                        unimplemented!();
-                    }
-                sym::ctlz
-                    | sym::ctlz_nonzero
-                    | sym::cttz
-                    | sym::cttz_nonzero
-                    | sym::ctpop
-                    | sym::bswap
-                    | sym::bitreverse
-                    | sym::rotate_left
-                    | sym::rotate_right
-                    | sym::saturating_add
-                    | sym::saturating_sub => {
-                        let ty = arg_tys[0];
-                        match int_type_width_signed(ty, self) {
-                            Some((width, signed)) => match name {
-                                sym::ctlz | sym::cttz => {
-                                    let func = self.current_func.borrow().expect("func");
-                                    let then_block = func.new_block("then");
-                                    let else_block = func.new_block("else");
-                                    let after_block = func.new_block("after");
-
-                                    let arg = args[0].immediate();
-                                    let result = func.new_local(None, arg.get_type(), "zeros");
-                                    let zero = self.cx.gcc_zero(arg.get_type());
-                                    let cond = self.gcc_icmp(IntPredicate::IntEQ, arg, zero);
-                                    self.llbb().end_with_conditional(None, cond, then_block, else_block);
-
-                                    let zero_result = self.cx.gcc_uint(arg.get_type(), width);
-                                    then_block.add_assignment(None, result, zero_result);
-                                    then_block.end_with_jump(None, after_block);
-
-                                    // NOTE: since jumps were added in a place
-                                    // count_leading_zeroes() does not expect, the current block
-                                    // in the state need to be updated.
-                                    self.switch_to_block(else_block);
-
-                                    let zeros =
-                                        match name {
-                                            sym::ctlz => self.count_leading_zeroes(width, arg),
-                                            sym::cttz => self.count_trailing_zeroes(width, arg),
-                                            _ => unreachable!(),
-                                        };
-                                    self.llbb().add_assignment(None, result, zeros);
-                                    self.llbb().end_with_jump(None, after_block);
-
-                                    // NOTE: since jumps were added in a place rustc does not
-                                    // expect, the current block in the state need to be updated.
-                                    self.switch_to_block(after_block);
-
-                                    result.to_rvalue()
-                                }
-                                sym::ctlz_nonzero => {
-                                    self.count_leading_zeroes(width, args[0].immediate())
-                                },
-                                sym::cttz_nonzero => {
-                                    self.count_trailing_zeroes(width, args[0].immediate())
-                                }
-                                sym::ctpop => self.pop_count(args[0].immediate()),
-                                sym::bswap => {
-                                    if width == 8 {
-                                        args[0].immediate() // byte swap a u8/i8 is just a no-op
-                                    }
-                                    else {
-                                        self.gcc_bswap(args[0].immediate(), width)
-                                    }
-                                },
-                                sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
-                                sym::rotate_left | sym::rotate_right => {
-                                    // TODO(antoyo): implement using algorithm from:
-                                    // https://blog.regehr.org/archives/1063
-                                    // for other platforms.
-                                    let is_left = name == sym::rotate_left;
-                                    let val = args[0].immediate();
-                                    let raw_shift = args[1].immediate();
-                                    if is_left {
-                                        self.rotate_left(val, raw_shift, width)
-                                    }
-                                    else {
-                                        self.rotate_right(val, raw_shift, width)
-                                    }
-                                },
-                                sym::saturating_add => {
-                                    self.saturating_add(args[0].immediate(), args[1].immediate(), signed, width)
-                                },
-                                sym::saturating_sub => {
-                                    self.saturating_sub(args[0].immediate(), args[1].immediate(), signed, width)
-                                },
-                                _ => bug!(),
-                            },
-                            None => {
-                                tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, ty });
-                                return Ok(());
+                        sym::ctlz_nonzero => self.count_leading_zeroes(width, args[0].immediate()),
+                        sym::cttz_nonzero => self.count_trailing_zeroes(width, args[0].immediate()),
+                        sym::ctpop => self.pop_count(args[0].immediate()),
+                        sym::bswap => {
+                            if width == 8 {
+                                args[0].immediate() // byte swap a u8/i8 is just a no-op
+                            } else {
+                                self.gcc_bswap(args[0].immediate(), width)
                             }
                         }
-                    }
-
-                sym::raw_eq => {
-                    use rustc_target::abi::Abi::*;
-                    let tp_ty = fn_args.type_at(0);
-                    let layout = self.layout_of(tp_ty).layout;
-                    let _use_integer_compare = match layout.abi() {
-                        Scalar(_) | ScalarPair(_, _) => true,
-                        Uninhabited | Vector { .. } => false,
-                        Aggregate { .. } => {
-                            // For rusty ABIs, small aggregates are actually passed
-                            // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
-                            // so we re-use that same threshold here.
-                            layout.size() <= self.data_layout().pointer_size * 2
+                        sym::bitreverse => self.bit_reverse(width, args[0].immediate()),
+                        sym::rotate_left | sym::rotate_right => {
+                            // TODO(antoyo): implement using algorithm from:
+                            // https://blog.regehr.org/archives/1063
+                            // for other platforms.
+                            let is_left = name == sym::rotate_left;
+                            let val = args[0].immediate();
+                            let raw_shift = args[1].immediate();
+                            if is_left {
+                                self.rotate_left(val, raw_shift, width)
+                            } else {
+                                self.rotate_right(val, raw_shift, width)
+                            }
                         }
-                    };
-
-                    let a = args[0].immediate();
-                    let b = args[1].immediate();
-                    if layout.size().bytes() == 0 {
-                        self.const_bool(true)
-                    }
-                    /*else if use_integer_compare {
-                        let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
-                        let ptr_ty = self.type_ptr_to(integer_ty);
-                        let a_ptr = self.bitcast(a, ptr_ty);
-                        let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
-                        let b_ptr = self.bitcast(b, ptr_ty);
-                        let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
-                        self.icmp(IntPredicate::IntEQ, a_val, b_val)
-                    }*/
-                    else {
-                        let void_ptr_type = self.context.new_type::<*const ()>();
-                        let a_ptr = self.bitcast(a, void_ptr_type);
-                        let b_ptr = self.bitcast(b, void_ptr_type);
-                        let n = self.context.new_cast(None, self.const_usize(layout.size().bytes()), self.sizet_type);
-                        let builtin = self.context.get_builtin_function("memcmp");
-                        let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
-                        self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
+                        sym::saturating_add => self.saturating_add(
+                            args[0].immediate(),
+                            args[1].immediate(),
+                            signed,
+                            width,
+                        ),
+                        sym::saturating_sub => self.saturating_sub(
+                            args[0].immediate(),
+                            args[1].immediate(),
+                            signed,
+                            width,
+                        ),
+                        _ => bug!(),
+                    },
+                    None => {
+                        tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
+                            span,
+                            name,
+                            ty,
+                        });
+                        return Ok(());
                     }
                 }
+            }
 
-                sym::compare_bytes => {
-                    let a = args[0].immediate();
-                    let b = args[1].immediate();
-                    let n = args[2].immediate();
+            sym::raw_eq => {
+                use rustc_target::abi::Abi::*;
+                let tp_ty = fn_args.type_at(0);
+                let layout = self.layout_of(tp_ty).layout;
+                let _use_integer_compare = match layout.abi() {
+                    Scalar(_) | ScalarPair(_, _) => true,
+                    Uninhabited | Vector { .. } => false,
+                    Aggregate { .. } => {
+                        // For rusty ABIs, small aggregates are actually passed
+                        // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
+                        // so we re-use that same threshold here.
+                        layout.size() <= self.data_layout().pointer_size * 2
+                    }
+                };
 
+                let a = args[0].immediate();
+                let b = args[1].immediate();
+                if layout.size().bytes() == 0 {
+                    self.const_bool(true)
+                }
+                /*else if use_integer_compare {
+                    let integer_ty = self.type_ix(layout.size.bits()); // FIXME(antoyo): LLVM creates an integer of 96 bits for [i32; 3], but gcc doesn't support this, so it creates an integer of 128 bits.
+                    let ptr_ty = self.type_ptr_to(integer_ty);
+                    let a_ptr = self.bitcast(a, ptr_ty);
+                    let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
+                    let b_ptr = self.bitcast(b, ptr_ty);
+                    let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
+                    self.icmp(IntPredicate::IntEQ, a_val, b_val)
+                }*/
+                else {
                     let void_ptr_type = self.context.new_type::<*const ()>();
                     let a_ptr = self.bitcast(a, void_ptr_type);
                     let b_ptr = self.bitcast(b, void_ptr_type);
-
-                    // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+                    let n = self.context.new_cast(
+                        None,
+                        self.const_usize(layout.size().bytes()),
+                        self.sizet_type,
+                    );
                     let builtin = self.context.get_builtin_function("memcmp");
                     let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
-                    self.sext(cmp, self.type_ix(32))
+                    self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0))
                 }
+            }
 
-                sym::black_box => {
-                    args[0].val.store(self, result);
+            sym::compare_bytes => {
+                let a = args[0].immediate();
+                let b = args[1].immediate();
+                let n = args[2].immediate();
 
-                    let block = self.llbb();
-                    let extended_asm = block.add_extended_asm(None, "");
-                    extended_asm.add_input_operand(None, "r", result.llval);
-                    extended_asm.add_clobber("memory");
-                    extended_asm.set_volatile_flag(true);
+                let void_ptr_type = self.context.new_type::<*const ()>();
+                let a_ptr = self.bitcast(a, void_ptr_type);
+                let b_ptr = self.bitcast(b, void_ptr_type);
 
-                    // We have copied the value to `result` already.
-                    return Ok(());
-                }
+                // Here we assume that the `memcmp` provided by the target is a NOP for size 0.
+                let builtin = self.context.get_builtin_function("memcmp");
+                let cmp = self.context.new_call(None, builtin, &[a_ptr, b_ptr, n]);
+                self.sext(cmp, self.type_ix(32))
+            }
 
-                sym::ptr_mask => {
-                    let usize_type = self.context.new_type::<usize>();
-                    let void_ptr_type = self.context.new_type::<*const ()>();
+            sym::black_box => {
+                args[0].val.store(self, result);
 
-                    let ptr = args[0].immediate();
-                    let mask = args[1].immediate();
+                let block = self.llbb();
+                let extended_asm = block.add_extended_asm(None, "");
+                extended_asm.add_input_operand(None, "r", result.llval);
+                extended_asm.add_clobber("memory");
+                extended_asm.set_volatile_flag(true);
 
-                    let addr = self.bitcast(ptr, usize_type);
-                    let masked = self.and(addr, mask);
-                    self.bitcast(masked, void_ptr_type)
-                },
+                // We have copied the value to `result` already.
+                return Ok(());
+            }
 
-                _ if name_str.starts_with("simd_") => {
-                    match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
-                        Ok(llval) => llval,
-                        Err(()) => return Ok(()),
-                    }
+            sym::ptr_mask => {
+                let usize_type = self.context.new_type::<usize>();
+                let void_ptr_type = self.context.new_type::<*const ()>();
+
+                let ptr = args[0].immediate();
+                let mask = args[1].immediate();
+
+                let addr = self.bitcast(ptr, usize_type);
+                let masked = self.and(addr, mask);
+                self.bitcast(masked, void_ptr_type)
+            }
+
+            _ if name_str.starts_with("simd_") => {
+                match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) {
+                    Ok(llval) => llval,
+                    Err(()) => return Ok(()),
                 }
+            }
 
-                // Fall back to default body
-                _ => return Err(Instance::new(instance.def_id(), instance.args)),
-            };
+            // Fall back to default body
+            _ => return Err(Instance::new(instance.def_id(), instance.args)),
+        };
 
         if !fn_abi.ret.is_ignore() {
             if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode {
                 let ptr_llty = self.type_ptr_to(ty.gcc_type(self));
                 let ptr = self.pointercast(result.llval, ptr_llty);
                 self.store(llval, ptr, result.align);
-            }
-            else {
+            } else {
                 OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
                     .val
                     .store(self, result);
@@ -423,11 +441,21 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
 }
 
 impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
-    fn store_fn_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>) {
+    fn store_fn_arg(
+        &mut self,
+        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, Self::Value>,
+    ) {
         arg_abi.store_fn_arg(self, idx, dst)
     }
 
-    fn store_arg(&mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+    fn store_arg(
+        &mut self,
+        arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
+        val: RValue<'gcc>,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
         arg_abi.store(self, val, dst)
     }
 
@@ -438,8 +466,18 @@ impl<'a, 'gcc, 'tcx> ArgAbiMethods<'tcx> for Builder<'a, 'gcc, 'tcx> {
 
 pub trait ArgAbiExt<'gcc, 'tcx> {
     fn memory_ty(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>;
-    fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>);
-    fn store_fn_arg(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>);
+    fn store(
+        &self,
+        bx: &mut Builder<'_, 'gcc, 'tcx>,
+        val: RValue<'gcc>,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    );
+    fn store_fn_arg(
+        &self,
+        bx: &mut Builder<'_, 'gcc, 'tcx>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    );
 }
 
 impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
@@ -453,17 +491,20 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
     /// place for the original Rust type of this argument/return.
     /// Can be used for both storing formal arguments into Rust variables
     /// or results of call/invoke instructions into their destinations.
-    fn store(&self, bx: &mut Builder<'_, 'gcc, 'tcx>, val: RValue<'gcc>, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+    fn store(
+        &self,
+        bx: &mut Builder<'_, 'gcc, 'tcx>,
+        val: RValue<'gcc>,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
         if self.is_ignore() {
             return;
         }
         if self.is_sized_indirect() {
             OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
-        }
-        else if self.is_unsized_indirect() {
+        } else if self.is_unsized_indirect() {
             bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
-        }
-        else if let PassMode::Cast { ref cast, .. } = self.mode {
+        } else if let PassMode::Cast { ref cast, .. } = self.mode {
             // FIXME(eddyb): Figure out when the simpler Store is safe, clang
             // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
             let can_store_through_cast_ptr = false;
@@ -471,8 +512,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
                 let cast_ptr_llty = bx.type_ptr_to(cast.gcc_type(bx));
                 let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
                 bx.store(val, cast_dst, self.layout.align.abi);
-            }
-            else {
+            } else {
                 // The actual return type is a struct, but the ABI
                 // adaptation code has cast it into some scalar type.  The
                 // code that follows is the only reliable way I have
@@ -508,35 +548,44 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
 
                 bx.lifetime_end(llscratch, scratch_size);
             }
-        }
-        else {
+        } else {
             OperandValue::Immediate(val).store(bx, dst);
         }
     }
 
-    fn store_fn_arg<'a>(&self, bx: &mut Builder<'a, 'gcc, 'tcx>, idx: &mut usize, dst: PlaceRef<'tcx, RValue<'gcc>>) {
+    fn store_fn_arg<'a>(
+        &self,
+        bx: &mut Builder<'a, 'gcc, 'tcx>,
+        idx: &mut usize,
+        dst: PlaceRef<'tcx, RValue<'gcc>>,
+    ) {
         let mut next = || {
             let val = bx.current_func().get_param(*idx as i32);
             *idx += 1;
             val.to_rvalue()
         };
         match self.mode {
-            PassMode::Ignore => {},
+            PassMode::Ignore => {}
             PassMode::Pair(..) => {
                 OperandValue::Pair(next(), next()).store(bx, dst);
-            },
+            }
             PassMode::Indirect { meta_attrs: Some(_), .. } => {
                 OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
-            },
-            PassMode::Direct(_) | PassMode::Indirect { meta_attrs: None, .. } | PassMode::Cast { .. } => {
+            }
+            PassMode::Direct(_)
+            | PassMode::Indirect { meta_attrs: None, .. }
+            | PassMode::Cast { .. } => {
                 let next_arg = next();
                 self.store(bx, next_arg, dst);
-            },
+            }
         }
     }
 }
 
-fn int_type_width_signed<'gcc, 'tcx>(ty: Ty<'tcx>, cx: &CodegenCx<'gcc, 'tcx>) -> Option<(u64, bool)> {
+fn int_type_width_signed<'gcc, 'tcx>(
+    ty: Ty<'tcx>,
+    cx: &CodegenCx<'gcc, 'tcx>,
+) -> Option<(u64, bool)> {
     match ty.kind() {
         ty::Int(t) => Some((
             match t {
@@ -570,82 +619,76 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let typ = result_type.to_unsigned(self.cx);
 
         let value =
-            if result_type.is_signed(self.cx) {
-                self.gcc_int_cast(value, typ)
-            }
-            else {
-                value
-            };
+            if result_type.is_signed(self.cx) { self.gcc_int_cast(value, typ) } else { value };
 
         let context = &self.cx.context;
-        let result =
-            match width {
-                8 | 16 | 32 | 64 => {
-                    let mask = ((1u128 << width) - 1) as u64;
-                    let (m0, m1, m2) = if width > 16 {
-                        (
-                            context.new_rvalue_from_long(typ, (0x5555555555555555u64 & mask) as i64),
-                            context.new_rvalue_from_long(typ, (0x3333333333333333u64 & mask) as i64),
-                            context.new_rvalue_from_long(typ, (0x0f0f0f0f0f0f0f0fu64 & mask) as i64),
-                        )
-                    } else {
-                        (
-                            context.new_rvalue_from_int(typ, (0x5555u64 & mask) as i32),
-                            context.new_rvalue_from_int(typ, (0x3333u64 & mask) as i32),
-                            context.new_rvalue_from_int(typ, (0x0f0fu64 & mask) as i32),
-                        )
-                    };
-                    let one = context.new_rvalue_from_int(typ, 1);
-                    let two = context.new_rvalue_from_int(typ, 2);
-                    let four = context.new_rvalue_from_int(typ, 4);
-
-                    // First step.
-                    let left = self.lshr(value, one);
-                    let left = self.and(left, m0);
-                    let right = self.and(value, m0);
-                    let right = self.shl(right, one);
-                    let step1 = self.or(left, right);
-
-                    // Second step.
-                    let left = self.lshr(step1, two);
-                    let left = self.and(left, m1);
-                    let right = self.and(step1, m1);
-                    let right = self.shl(right, two);
-                    let step2 = self.or(left, right);
-
-                    // Third step.
-                    let left = self.lshr(step2, four);
-                    let left = self.and(left, m2);
-                    let right = self.and(step2, m2);
-                    let right = self.shl(right, four);
-                    let step3 = self.or(left, right);
-
-                    // Fourth step.
-                    if width == 8 {
-                        step3
-                    } else {
-                        self.gcc_bswap(step3, width)
-                    }
-                },
-                128 => {
-                    // TODO(antoyo): find a more efficient implementation?
-                    let sixty_four = self.gcc_int(typ, 64);
-                    let right_shift = self.gcc_lshr(value, sixty_four);
-                    let high = self.gcc_int_cast(right_shift, self.u64_type);
-                    let low = self.gcc_int_cast(value, self.u64_type);
-
-                    let reversed_high = self.bit_reverse(64, high);
-                    let reversed_low = self.bit_reverse(64, low);
-
-                    let new_low = self.gcc_int_cast(reversed_high, typ);
-                    let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four);
-
-                    self.gcc_or(new_low, new_high)
-                },
-                _ => {
-                    panic!("cannot bit reverse with width = {}", width);
-                },
-            };
+        let result = match width {
+            8 | 16 | 32 | 64 => {
+                let mask = ((1u128 << width) - 1) as u64;
+                let (m0, m1, m2) = if width > 16 {
+                    (
+                        context.new_rvalue_from_long(typ, (0x5555555555555555u64 & mask) as i64),
+                        context.new_rvalue_from_long(typ, (0x3333333333333333u64 & mask) as i64),
+                        context.new_rvalue_from_long(typ, (0x0f0f0f0f0f0f0f0fu64 & mask) as i64),
+                    )
+                } else {
+                    (
+                        context.new_rvalue_from_int(typ, (0x5555u64 & mask) as i32),
+                        context.new_rvalue_from_int(typ, (0x3333u64 & mask) as i32),
+                        context.new_rvalue_from_int(typ, (0x0f0fu64 & mask) as i32),
+                    )
+                };
+                let one = context.new_rvalue_from_int(typ, 1);
+                let two = context.new_rvalue_from_int(typ, 2);
+                let four = context.new_rvalue_from_int(typ, 4);
+
+                // First step.
+                let left = self.lshr(value, one);
+                let left = self.and(left, m0);
+                let right = self.and(value, m0);
+                let right = self.shl(right, one);
+                let step1 = self.or(left, right);
+
+                // Second step.
+                let left = self.lshr(step1, two);
+                let left = self.and(left, m1);
+                let right = self.and(step1, m1);
+                let right = self.shl(right, two);
+                let step2 = self.or(left, right);
+
+                // Third step.
+                let left = self.lshr(step2, four);
+                let left = self.and(left, m2);
+                let right = self.and(step2, m2);
+                let right = self.shl(right, four);
+                let step3 = self.or(left, right);
+
+                // Fourth step.
+                if width == 8 {
+                    step3
+                } else {
+                    self.gcc_bswap(step3, width)
+                }
+            }
+            128 => {
+                // TODO(antoyo): find a more efficient implementation?
+                let sixty_four = self.gcc_int(typ, 64);
+                let right_shift = self.gcc_lshr(value, sixty_four);
+                let high = self.gcc_int_cast(right_shift, self.u64_type);
+                let low = self.gcc_int_cast(value, self.u64_type);
+
+                let reversed_high = self.bit_reverse(64, high);
+                let reversed_low = self.bit_reverse(64, low);
+
+                let new_low = self.gcc_int_cast(reversed_high, typ);
+                let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four);
+
+                self.gcc_or(new_low, new_high, self.location)
+            }
+            _ => {
+                panic!("cannot bit reverse with width = {}", width);
+            }
+        };
 
         self.gcc_int_cast(result, result_type)
     }
@@ -685,56 +728,54 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                 let first_elem = self.context.new_array_access(None, result, zero);
                 let first_value = self.gcc_int_cast(self.context.new_call(None, clzll, &[high]), arg_type);
                 self.llbb()
-                    .add_assignment(None, first_elem, first_value);
+                    .add_assignment(self.location, first_elem, first_value);
 
-                let second_elem = self.context.new_array_access(None, result, one);
-                let cast = self.gcc_int_cast(self.context.new_call(None, clzll, &[low]), arg_type);
+                let second_elem = self.context.new_array_access(self.location, result, one);
+                let cast = self.gcc_int_cast(self.context.new_call(self.location, clzll, &[low]), arg_type);
                 let second_value = self.add(cast, sixty_four);
                 self.llbb()
-                    .add_assignment(None, second_elem, second_value);
+                    .add_assignment(self.location, second_elem, second_value);
 
-                let third_elem = self.context.new_array_access(None, result, two);
+                let third_elem = self.context.new_array_access(self.location, result, two);
                 let third_value = self.const_uint(arg_type, 128);
                 self.llbb()
-                    .add_assignment(None, third_elem, third_value);
+                    .add_assignment(self.location, third_elem, third_value);
 
-                let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
-                let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
+                let not_high = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, high);
+                let not_low = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, low);
                 let not_low_and_not_high = not_low & not_high;
                 let index = not_high + not_low_and_not_high;
                 // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
                 // gcc.
                 // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
                 // compilation stage.
-                let index = self.context.new_cast(None, index, self.i32_type);
+                let index = self.context.new_cast(self.location, index, self.i32_type);
 
-                let res = self.context.new_array_access(None, result, index);
+                let res = self.context.new_array_access(self.location, result, index);
 
                 return self.gcc_int_cast(res.to_rvalue(), arg_type);
             }
             else {
                 let count_leading_zeroes = self.context.get_builtin_function("__builtin_clzll");
-                let arg = self.context.new_cast(None, arg, self.ulonglong_type);
+                let arg = self.context.new_cast(self.location, arg, self.ulonglong_type);
                 let diff = self.ulonglong_type.get_size() as i64 - arg_type.get_size() as i64;
                 let diff = self.context.new_rvalue_from_long(self.int_type, diff * 8);
-                let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
-                return self.context.new_cast(None, res, arg_type);
+                let res = self.context.new_call(self.location, count_leading_zeroes, &[arg]) - diff;
+                return self.context.new_cast(self.location, res, arg_type);
             };
         let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
-        let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
-        self.context.new_cast(None, res, arg_type)
+        let res = self.context.new_call(self.location, count_leading_zeroes, &[arg]);
+        self.context.new_cast(self.location, res, arg_type)
     }
 
     fn count_trailing_zeroes(&mut self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
         let result_type = arg.get_type();
-        let arg =
-            if result_type.is_signed(self.cx) {
-                let new_type = result_type.to_unsigned(self.cx);
-                self.gcc_int_cast(arg, new_type)
-            }
-            else {
-                arg
-            };
+        let arg = if result_type.is_signed(self.cx) {
+            let new_type = result_type.to_unsigned(self.cx);
+            self.gcc_int_cast(arg, new_type)
+        } else {
+            arg
+        };
         let arg_type = arg.get_type();
         let (count_trailing_zeroes, expected_type) =
             // TODO(antoyo): write a new function Type::is_compatible_with(&Type) and use it here
@@ -766,58 +807,56 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 
                 let ctzll = self.context.get_builtin_function("__builtin_ctzll");
 
-                let first_elem = self.context.new_array_access(None, result, zero);
-                let first_value = self.gcc_int_cast(self.context.new_call(None, ctzll, &[low]), arg_type);
+                let first_elem = self.context.new_array_access(self.location, result, zero);
+                let first_value = self.gcc_int_cast(self.context.new_call(self.location, ctzll, &[low]), arg_type);
                 self.llbb()
-                    .add_assignment(None, first_elem, first_value);
+                    .add_assignment(self.location, first_elem, first_value);
 
-                let second_elem = self.context.new_array_access(None, result, one);
-                let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(None, ctzll, &[high]), arg_type), sixty_four);
+                let second_elem = self.context.new_array_access(self.location, result, one);
+                let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(self.location, ctzll, &[high]), arg_type), sixty_four);
                 self.llbb()
-                    .add_assignment(None, second_elem, second_value);
+                    .add_assignment(self.location, second_elem, second_value);
 
-                let third_elem = self.context.new_array_access(None, result, two);
+                let third_elem = self.context.new_array_access(self.location, result, two);
                 let third_value = self.gcc_int(arg_type, 128);
                 self.llbb()
-                    .add_assignment(None, third_elem, third_value);
+                    .add_assignment(self.location, third_elem, third_value);
 
-                let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
-                let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
+                let not_low = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, low);
+                let not_high = self.context.new_unary_op(self.location, UnaryOp::LogicalNegate, self.u64_type, high);
                 let not_low_and_not_high = not_low & not_high;
                 let index = not_low + not_low_and_not_high;
                 // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
                 // gcc.
                 // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
                 // compilation stage.
-                let index = self.context.new_cast(None, index, self.i32_type);
+                let index = self.context.new_cast(self.location, index, self.i32_type);
 
-                let res = self.context.new_array_access(None, result, index);
+                let res = self.context.new_array_access(self.location, result, index);
 
                 return self.gcc_int_cast(res.to_rvalue(), result_type);
             }
             else {
                 let count_trailing_zeroes = self.context.get_builtin_function("__builtin_ctzll");
                 let arg_size = arg_type.get_size();
-                let casted_arg = self.context.new_cast(None, arg, self.ulonglong_type);
+                let casted_arg = self.context.new_cast(self.location, arg, self.ulonglong_type);
                 let byte_diff = self.ulonglong_type.get_size() as i64 - arg_size as i64;
                 let diff = self.context.new_rvalue_from_long(self.int_type, byte_diff * 8);
                 let mask = self.context.new_rvalue_from_long(arg_type, -1); // To get the value with all bits set.
-                let masked = mask & self.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, arg);
-                let cond = self.context.new_comparison(None, ComparisonOp::Equals, masked, mask);
-                let diff = diff * self.context.new_cast(None, cond, self.int_type);
-                let res = self.context.new_call(None, count_trailing_zeroes, &[casted_arg]) - diff;
-                return self.context.new_cast(None, res, result_type);
+                let masked = mask & self.context.new_unary_op(self.location, UnaryOp::BitwiseNegate, arg_type, arg);
+                let cond = self.context.new_comparison(self.location, ComparisonOp::Equals, masked, mask);
+                let diff = diff * self.context.new_cast(self.location, cond, self.int_type);
+                let res = self.context.new_call(self.location, count_trailing_zeroes, &[casted_arg]) - diff;
+                return self.context.new_cast(self.location, res, result_type);
             };
         let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
-        let arg =
-            if arg_type != expected_type {
-                self.context.new_cast(None, arg, expected_type)
-            }
-            else {
-                arg
-            };
-        let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
-        self.context.new_cast(None, res, result_type)
+        let arg = if arg_type != expected_type {
+            self.context.new_cast(self.location, arg, expected_type)
+        } else {
+            arg
+        };
+        let res = self.context.new_call(self.location, count_trailing_zeroes, &[arg]);
+        self.context.new_cast(self.location, res, result_type)
     }
 
     fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> {
@@ -825,13 +864,11 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let result_type = value.get_type();
         let value_type = result_type.to_unsigned(self.cx);
 
-        let value =
-            if result_type.is_signed(self.cx) {
-                self.gcc_int_cast(value, value_type)
-            }
-            else {
-                value
-            };
+        let value = if result_type.is_signed(self.cx) {
+            self.gcc_int_cast(value, value_type)
+        } else {
+            value
+        };
 
         // only break apart 128-bit ints if they're not natively supported
         // TODO(antoyo): remove this if/when native 128-bit integers land in libgccjit
@@ -859,8 +896,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let counter = self.current_func().new_local(None, counter_type, "popcount_counter");
         let val = self.current_func().new_local(None, value_type, "popcount_value");
         let zero = self.gcc_zero(counter_type);
-        self.llbb().add_assignment(None, counter, zero);
-        self.llbb().add_assignment(None, val, value);
+        self.llbb().add_assignment(self.location, counter, zero);
+        self.llbb().add_assignment(self.location, val, value);
         self.br(loop_head);
 
         // check if value isn't zero
@@ -874,12 +911,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let one = self.gcc_int(value_type, 1);
         let sub = self.gcc_sub(val.to_rvalue(), one);
         let op = self.gcc_and(val.to_rvalue(), sub);
-        loop_body.add_assignment(None, val, op);
+        loop_body.add_assignment(self.location, val, op);
 
         // counter += 1
         let one = self.gcc_int(counter_type, 1);
         let op = self.gcc_add(counter.to_rvalue(), one);
-        loop_body.add_assignment(None, counter, op);
+        loop_body.add_assignment(self.location, counter, op);
         self.br(loop_head);
 
         // end of loop
@@ -888,66 +925,70 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
     }
 
     // Algorithm from: https://blog.regehr.org/archives/1063
-    fn rotate_left(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+    fn rotate_left(
+        &mut self,
+        value: RValue<'gcc>,
+        shift: RValue<'gcc>,
+        width: u64,
+    ) -> RValue<'gcc> {
         let max = self.const_uint(shift.get_type(), width);
         let shift = self.urem(shift, max);
         let lhs = self.shl(value, shift);
         let result_neg = self.neg(shift);
-        let result_and =
-            self.and(
-                result_neg,
-                self.const_uint(shift.get_type(), width - 1),
-            );
+        let result_and = self.and(result_neg, self.const_uint(shift.get_type(), width - 1));
         let rhs = self.lshr(value, result_and);
         self.or(lhs, rhs)
     }
 
     // Algorithm from: https://blog.regehr.org/archives/1063
-    fn rotate_right(&mut self, value: RValue<'gcc>, shift: RValue<'gcc>, width: u64) -> RValue<'gcc> {
+    fn rotate_right(
+        &mut self,
+        value: RValue<'gcc>,
+        shift: RValue<'gcc>,
+        width: u64,
+    ) -> RValue<'gcc> {
         let max = self.const_uint(shift.get_type(), width);
         let shift = self.urem(shift, max);
         let lhs = self.lshr(value, shift);
         let result_neg = self.neg(shift);
-        let result_and =
-            self.and(
-                result_neg,
-                self.const_uint(shift.get_type(), width - 1),
-            );
+        let result_and = self.and(result_neg, self.const_uint(shift.get_type(), width - 1));
         let rhs = self.shl(value, result_and);
         self.or(lhs, rhs)
     }
 
-    fn saturating_add(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+    fn saturating_add(
+        &mut self,
+        lhs: RValue<'gcc>,
+        rhs: RValue<'gcc>,
+        signed: bool,
+        width: u64,
+    ) -> RValue<'gcc> {
         let result_type = lhs.get_type();
         if signed {
             // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
             let func = self.current_func.borrow().expect("func");
-            let res = func.new_local(None, result_type, "saturating_sum");
+            let res = func.new_local(self.location, result_type, "saturating_sum");
             let supports_native_type = self.is_native_int_type(result_type);
-            let overflow =
-                if supports_native_type {
-                    let func_name =
-                        match width {
-                            8 => "__builtin_add_overflow",
-                            16 => "__builtin_add_overflow",
-                            32 => "__builtin_sadd_overflow",
-                            64 => "__builtin_saddll_overflow",
-                            128 => "__builtin_add_overflow",
-                            _ => unreachable!(),
-                        };
-                    let overflow_func = self.context.get_builtin_function(func_name);
-                    self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
-                }
-                else {
-                    let func_name =
-                        match width {
-                            128 => "__rust_i128_addo",
-                            _ => unreachable!(),
-                        };
-                    let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
-                    self.llbb().add_assignment(None, res, int_result);
-                    overflow
+            let overflow = if supports_native_type {
+                let func_name = match width {
+                    8 => "__builtin_add_overflow",
+                    16 => "__builtin_add_overflow",
+                    32 => "__builtin_sadd_overflow",
+                    64 => "__builtin_saddll_overflow",
+                    128 => "__builtin_add_overflow",
+                    _ => unreachable!(),
+                };
+                let overflow_func = self.context.get_builtin_function(func_name);
+                self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(self.location)], None)
+            } else {
+                let func_name = match width {
+                    128 => "__rust_i128_addo",
+                    _ => unreachable!(),
                 };
+                let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
+                self.llbb().add_assignment(self.location, res, int_result);
+                overflow
+            };
 
             let then_block = func.new_block("then");
             let after_block = func.new_block("after");
@@ -955,83 +996,97 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             // Return `result_type`'s maximum or minimum value on overflow
             // NOTE: convert the type to unsigned to have an unsigned shift.
             let unsigned_type = result_type.to_unsigned(&self.cx);
-            let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
+            let shifted = self.gcc_lshr(
+                self.gcc_int_cast(lhs, unsigned_type),
+                self.gcc_int(unsigned_type, width as i64 - 1),
+            );
             let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
             let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
-            then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
-            then_block.end_with_jump(None, after_block);
+            then_block.add_assignment(
+                self.location,
+                res,
+                self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type),
+            );
+            then_block.end_with_jump(self.location, after_block);
 
-            self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+            self.llbb().end_with_conditional(self.location, overflow, then_block, after_block);
 
             // NOTE: since jumps were added in a place rustc does not
             // expect, the current block in the state need to be updated.
             self.switch_to_block(after_block);
 
             res.to_rvalue()
-        }
-        else {
+        } else {
             // Algorithm from: http://locklessinc.com/articles/sat_arithmetic/
             let res = self.gcc_add(lhs, rhs);
             let cond = self.gcc_icmp(IntPredicate::IntULT, res, lhs);
             let value = self.gcc_neg(self.gcc_int_cast(cond, result_type));
-            self.gcc_or(res, value)
+            self.gcc_or(res, value, self.location)
         }
     }
 
     // Algorithm from: https://locklessinc.com/articles/sat_arithmetic/
-    fn saturating_sub(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>, signed: bool, width: u64) -> RValue<'gcc> {
+    fn saturating_sub(
+        &mut self,
+        lhs: RValue<'gcc>,
+        rhs: RValue<'gcc>,
+        signed: bool,
+        width: u64,
+    ) -> RValue<'gcc> {
         let result_type = lhs.get_type();
         if signed {
             // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
             let func = self.current_func.borrow().expect("func");
-            let res = func.new_local(None, result_type, "saturating_diff");
+            let res = func.new_local(self.location, result_type, "saturating_diff");
             let supports_native_type = self.is_native_int_type(result_type);
-            let overflow =
-                if supports_native_type {
-                    let func_name =
-                        match width {
-                            8 => "__builtin_sub_overflow",
-                            16 => "__builtin_sub_overflow",
-                            32 => "__builtin_ssub_overflow",
-                            64 => "__builtin_ssubll_overflow",
-                            128 => "__builtin_sub_overflow",
-                            _ => unreachable!(),
-                        };
-                    let overflow_func = self.context.get_builtin_function(func_name);
-                    self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
-                }
-                else {
-                    let func_name =
-                        match width {
-                            128 => "__rust_i128_subo",
-                            _ => unreachable!(),
-                        };
-                    let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
-                    self.llbb().add_assignment(None, res, int_result);
-                    overflow
+            let overflow = if supports_native_type {
+                let func_name = match width {
+                    8 => "__builtin_sub_overflow",
+                    16 => "__builtin_sub_overflow",
+                    32 => "__builtin_ssub_overflow",
+                    64 => "__builtin_ssubll_overflow",
+                    128 => "__builtin_sub_overflow",
+                    _ => unreachable!(),
                 };
+                let overflow_func = self.context.get_builtin_function(func_name);
+                self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(self.location)], None)
+            } else {
+                let func_name = match width {
+                    128 => "__rust_i128_subo",
+                    _ => unreachable!(),
+                };
+                let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
+                self.llbb().add_assignment(self.location, res, int_result);
+                overflow
+            };
 
             let then_block = func.new_block("then");
             let after_block = func.new_block("after");
 
             // Return `result_type`'s maximum or minimum value on overflow
             // NOTE: convert the type to unsigned to have an unsigned shift.
-            let unsigned_type = result_type.to_unsigned(&self.cx);
-            let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
+            let unsigned_type = result_type.to_unsigned(self.cx);
+            let shifted = self.gcc_lshr(
+                self.gcc_int_cast(lhs, unsigned_type),
+                self.gcc_int(unsigned_type, width as i64 - 1),
+            );
             let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
             let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
-            then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
-            then_block.end_with_jump(None, after_block);
+            then_block.add_assignment(
+                self.location,
+                res,
+                self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type),
+            );
+            then_block.end_with_jump(self.location, after_block);
 
-            self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+            self.llbb().end_with_conditional(self.location, overflow, then_block, after_block);
 
             // NOTE: since jumps were added in a place rustc does not
             // expect, the current block in the state need to be updated.
             self.switch_to_block(after_block);
 
             res.to_rvalue()
-        }
-        else {
+        } else {
             let res = self.gcc_sub(lhs, rhs);
             let comparison = self.gcc_icmp(IntPredicate::IntULE, res, lhs);
             let value = self.gcc_neg(self.gcc_int_cast(comparison, result_type));
@@ -1040,21 +1095,26 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
     }
 }
 
-fn try_intrinsic<'a, 'b, 'gcc, 'tcx>(bx: &'b mut Builder<'a, 'gcc, 'tcx>, try_func: RValue<'gcc>, data: RValue<'gcc>, _catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
+fn try_intrinsic<'a, 'b, 'gcc, 'tcx>(
+    bx: &'b mut Builder<'a, 'gcc, 'tcx>,
+    try_func: RValue<'gcc>,
+    data: RValue<'gcc>,
+    _catch_func: RValue<'gcc>,
+    dest: RValue<'gcc>,
+) {
     if bx.sess().panic_strategy() == PanicStrategy::Abort {
         bx.call(bx.type_void(), None, None, try_func, &[data], None);
         // Return 0 unconditionally from the intrinsic call;
         // we can never unwind.
         let ret_align = bx.tcx.data_layout.i32_align.abi;
         bx.store(bx.const_i32(0), dest, ret_align);
-    }
-    else if wants_msvc_seh(bx.sess()) {
-        unimplemented!();
-    }
-    else {
-        #[cfg(feature="master")]
+    } else {
+        if wants_msvc_seh(bx.sess()) {
+            unimplemented!();
+        }
+        #[cfg(feature = "master")]
         codegen_gnu_try(bx, try_func, data, _catch_func, dest);
-        #[cfg(not(feature="master"))]
+        #[cfg(not(feature = "master"))]
         unimplemented!();
     }
 }
@@ -1070,8 +1130,14 @@ fn try_intrinsic<'a, 'b, 'gcc, 'tcx>(bx: &'b mut Builder<'a, 'gcc, 'tcx>, try_fu
 // function calling it, and that function may already have other personality
 // functions in play. By calling a shim we're guaranteed that our shim will have
 // the right personality function.
-#[cfg(feature="master")]
-fn codegen_gnu_try<'gcc>(bx: &mut Builder<'_, 'gcc, '_>, try_func: RValue<'gcc>, data: RValue<'gcc>, catch_func: RValue<'gcc>, dest: RValue<'gcc>) {
+#[cfg(feature = "master")]
+fn codegen_gnu_try<'gcc>(
+    bx: &mut Builder<'_, 'gcc, '_>,
+    try_func: RValue<'gcc>,
+    data: RValue<'gcc>,
+    catch_func: RValue<'gcc>,
+    dest: RValue<'gcc>,
+) {
     let cx: &CodegenCx<'gcc, '_> = bx.cx;
     let (llty, func) = get_rust_try_fn(cx, &mut |mut bx| {
         // Codegens the shims described above:
@@ -1095,7 +1161,7 @@ fn codegen_gnu_try<'gcc>(bx: &mut Builder<'_, 'gcc, '_>, try_func: RValue<'gcc>,
         let catch_func = func.get_param(2).to_rvalue();
         let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void());
 
-        let current_block = bx.block.clone();
+        let current_block = bx.block;
 
         bx.switch_to_block(then);
         bx.ret(bx.const_i32(0));
@@ -1130,36 +1196,44 @@ fn codegen_gnu_try<'gcc>(bx: &mut Builder<'_, 'gcc, '_>, try_func: RValue<'gcc>,
     bx.store(ret, dest, i32_align);
 }
 
-
 // Helper function used to get a handle to the `__rust_try` function used to
 // catch exceptions.
 //
 // This function is only generated once and is then cached.
-#[cfg(feature="master")]
-fn get_rust_try_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>)) -> (Type<'gcc>, Function<'gcc>) {
+#[cfg(feature = "master")]
+fn get_rust_try_fn<'a, 'gcc, 'tcx>(
+    cx: &'a CodegenCx<'gcc, 'tcx>,
+    codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>),
+) -> (Type<'gcc>, Function<'gcc>) {
     if let Some(llfn) = cx.rust_try_fn.get() {
         return llfn;
     }
 
     // Define the type up front for the signature of the rust_try function.
     let tcx = cx.tcx;
-    let i8p = Ty::new_mut_ptr(tcx,tcx.types.i8);
+    let i8p = Ty::new_mut_ptr(tcx, tcx.types.i8);
     // `unsafe fn(*mut i8) -> ()`
-    let try_fn_ty = Ty::new_fn_ptr(tcx,ty::Binder::dummy(tcx.mk_fn_sig(
-        iter::once(i8p),
-        Ty::new_unit(tcx,),
-        false,
-        rustc_hir::Unsafety::Unsafe,
-        Abi::Rust,
-    )));
+    let try_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            iter::once(i8p),
+            Ty::new_unit(tcx),
+            false,
+            rustc_hir::Unsafety::Unsafe,
+            Abi::Rust,
+        )),
+    );
     // `unsafe fn(*mut i8, *mut i8) -> ()`
-    let catch_fn_ty = Ty::new_fn_ptr(tcx,ty::Binder::dummy(tcx.mk_fn_sig(
-        [i8p, i8p].iter().cloned(),
-        Ty::new_unit(tcx,),
-        false,
-        rustc_hir::Unsafety::Unsafe,
-        Abi::Rust,
-    )));
+    let catch_fn_ty = Ty::new_fn_ptr(
+        tcx,
+        ty::Binder::dummy(tcx.mk_fn_sig(
+            [i8p, i8p].iter().cloned(),
+            Ty::new_unit(tcx),
+            false,
+            rustc_hir::Unsafety::Unsafe,
+            Abi::Rust,
+        )),
+    );
     // `unsafe fn(unsafe fn(*mut i8) -> (), *mut i8, unsafe fn(*mut i8, *mut i8) -> ()) -> i32`
     let rust_fn_sig = ty::Binder::dummy(cx.tcx.mk_fn_sig(
         [try_fn_ty, i8p, catch_fn_ty],
@@ -1175,8 +1249,13 @@ fn get_rust_try_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, codegen: &mut
 
 // Helper function to give a Block to a closure to codegen a shim function.
 // This is currently primarily used for the `try` intrinsic functions above.
-#[cfg(feature="master")]
-fn gen_fn<'a, 'gcc, 'tcx>(cx: &'a CodegenCx<'gcc, 'tcx>, name: &str, rust_fn_sig: ty::PolyFnSig<'tcx>, codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>)) -> (Type<'gcc>, Function<'gcc>) {
+#[cfg(feature = "master")]
+fn gen_fn<'a, 'gcc, 'tcx>(
+    cx: &'a CodegenCx<'gcc, 'tcx>,
+    name: &str,
+    rust_fn_sig: ty::PolyFnSig<'tcx>,
+    codegen: &mut dyn FnMut(Builder<'a, 'gcc, 'tcx>),
+) -> (Type<'gcc>, Function<'gcc>) {
     let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty());
     let return_type = fn_abi.gcc_type(cx).return_type;
     // FIXME(eddyb) find a nicer way to do this.