about summary refs log tree commit diff
path: root/src
diff options
context:
space:
mode:
authortempdragon <645703113@qq.com>2024-02-24 19:56:29 +0800
committertempdragon <645703113@qq.com>2024-02-24 20:12:08 +0800
commit2ffe9d1eefb175addf8c14cc48dd7fcf91b2b008 (patch)
treeb52be5bb5f14d60af49e6a6710baaf961264db6e /src
parentc638defad75543fe2762a2afd34e4b7f97be5a35 (diff)
downloadrust-2ffe9d1eefb175addf8c14cc48dd7fcf91b2b008.tar.gz
rust-2ffe9d1eefb175addf8c14cc48dd7fcf91b2b008.zip
feat(int.rs&build.rs): Add location info to arithmetic operators
TODO:
1. Clean the unnecessary locations in builder.rs & int.rs
2. Add demangling support
3. Add debug scope support
4. Add vtable support
5. Clean up builder.rs locations
Diffstat (limited to 'src')
-rw-r--r--src/builder.rs68
-rw-r--r--src/int.rs204
-rw-r--r--src/intrinsic/mod.rs108
3 files changed, 199 insertions, 181 deletions
diff --git a/src/builder.rs b/src/builder.rs
index fc4c4d86a7d..c5d3ed8c8a1 100644
--- a/src/builder.rs
+++ b/src/builder.rs
@@ -26,6 +26,7 @@ use rustc_codegen_ssa::traits::{
 use rustc_data_structures::fx::FxHashSet;
 use rustc_middle::bug;
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
+use rustc_middle::mir::Rvalue;
 use rustc_middle::ty::{ParamEnv, Ty, TyCtxt};
 use rustc_middle::ty::layout::{FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, HasTyCtxt, LayoutError, LayoutOfHelpers, TyAndLayout};
 use rustc_span::Span;
@@ -398,6 +399,16 @@ impl<'gcc, 'tcx> BackendTypes for Builder<'_, 'gcc, 'tcx> {
     type DIVariable = <CodegenCx<'gcc, 'tcx> as BackendTypes>::DIVariable;
 }
 
+pub fn set_rval_location<'a, 'gcc, 'tcx>(bx: &mut Builder<'a,'gcc,'tcx>, r:RValue<'gcc>) -> RValue<'gcc> {
+    if bx.loc.is_some(){
+        unsafe {
+            r.set_location(bx.loc.unwrap());
+        }
+    }
+    r
+    
+}
+
 impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     fn build(cx: &'a CodegenCx<'gcc, 'tcx>, block: Block<'gcc>) -> Builder<'a, 'gcc, 'tcx> {
         Builder::with_cx(cx, block)
@@ -612,7 +623,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
             // FIXME(antoyo): this seems to produce the wrong result.
             return self.context.new_call(self.loc, fmodf, &[a, b]);
         }
-        else if let Some(vector_type) = a_type_unqualified.dyncast_vector() {
+        if let Some(vector_type) = a_type_unqualified.dyncast_vector() {
             assert_eq!(a_type_unqualified, b.get_type().unqualified());
 
             let num_units = vector_type.get_num_units();
@@ -630,7 +641,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
         assert_eq!(a_type_unqualified, self.cx.double_type);
 
         let fmod = self.context.get_builtin_function("fmod");
-        return self.context.new_call(self.loc, fmod, &[a, b]);
+        self.context.new_call(self.loc, fmod, &[a, b])
     }
 
     fn shl(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
@@ -652,73 +663,80 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     fn or(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.cx.gcc_or(a, b)
+        let ret = self.cx.gcc_or(a, b, self.loc);
+        
+        if self.loc.is_some() {
+            unsafe { ret.set_location(self.loc.unwrap()); }
+        }
+        ret
     }
 
     fn xor(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_xor(a, b)
+        set_rval_location(self,self.gcc_xor(a, b))
     }
 
     fn neg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_neg(a)
+        set_rval_location(self,self.gcc_neg(a))
     }
 
     fn fneg(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
-        self.cx.context.new_unary_op(self.loc, UnaryOp::Minus, a.get_type(), a)
+        set_rval_location(self,self.cx.context.new_unary_op(self.loc, UnaryOp::Minus, a.get_type(), a))
     }
 
     fn not(&mut self, a: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_not(a)
+        set_rval_location(self,self.gcc_not(a))
     }
 
     fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_add(a, b)
+        set_rval_location(self,self.gcc_add(a, b))
     }
 
     fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_add(a, b)
+        set_rval_location(self,self.gcc_add(a, b))
     }
 
     fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_sub(a, b)
+        set_rval_location(self,self.gcc_sub(a, b))
     }
 
     fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
         // TODO(antoyo): should generate poison value?
-        self.gcc_sub(a, b)
+        set_rval_location(self,self.gcc_sub(a, b))
     }
 
     fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_mul(a, b)
+        set_rval_location(self,self.gcc_mul(a, b))
     }
 
     fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.gcc_mul(a, b)
+        set_rval_location(self,self.gcc_mul(a, b))
     }
 
     fn fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs + rhs
+        set_rval_location(self,lhs + rhs)
     }
 
     fn fsub_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs - rhs
+        set_rval_location(self,lhs - rhs)
     }
 
     fn fmul_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs * rhs
+        set_rval_location(self,lhs * rhs)
     }
 
     fn fdiv_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        lhs / rhs
+        set_rval_location(self,lhs / rhs)
     }
 
     fn frem_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> {
         // NOTE: it seems like we cannot enable fast-mode for a single operation in GCC.
-        self.frem(lhs, rhs)
+        let i = self.frem(lhs, rhs);
+        set_rval_location(self,i);
+        i
     }
 
     fn checked_binop(&mut self, oop: OverflowOp, typ: Ty<'_>, lhs: Self::Value, rhs: Self::Value) -> (Self::Value, Self::Value) {
@@ -1005,33 +1023,33 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
     }
 
     fn fptoui(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_float_to_uint_cast(value, dest_ty)
+        set_rval_location(self,self.gcc_float_to_uint_cast(value, dest_ty))
     }
 
     fn fptosi(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_float_to_int_cast(value, dest_ty)
+        set_rval_location(self,self.gcc_float_to_int_cast(value, dest_ty))
     }
 
     fn uitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_uint_to_float_cast(value, dest_ty)
+        set_rval_location(self,self.gcc_uint_to_float_cast(value, dest_ty))
     }
 
     fn sitofp(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.gcc_int_to_float_cast(value, dest_ty)
+        set_rval_location(self,self.gcc_int_to_float_cast(value, dest_ty))
     }
 
     fn fptrunc(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
         // TODO(antoyo): make sure it truncates.
-        self.context.new_cast(self.loc, value, dest_ty)
+        set_rval_location(self,self.context.new_cast(self.loc, value, dest_ty))
     }
 
     fn fpext(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
-        self.context.new_cast(self.loc, value, dest_ty)
+        set_rval_location(self,self.context.new_cast(self.loc, value, dest_ty))
     }
 
     fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
         let usize_value = self.cx.const_bitcast(value, self.cx.type_isize());
-        self.intcast(usize_value, dest_ty, false)
+        self.intcast(usize_value, dest_ty, false)        
     }
 
     fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> {
diff --git a/src/int.rs b/src/int.rs
index b0ceacf2ffc..fe38d89ff8c 100644
--- a/src/int.rs
+++ b/src/int.rs
@@ -4,7 +4,7 @@
 
 use std::convert::TryFrom;
 
-use gccjit::{ComparisonOp, FunctionType, RValue, ToRValue, Type, UnaryOp, BinaryOp};
+use gccjit::{BinaryOp, ComparisonOp, FunctionType, Location, RValue, ToRValue, Type, UnaryOp};
 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
 use rustc_codegen_ssa::traits::{BackendTypes, BaseTypeMethods, BuilderMethods, OverflowOp};
 use rustc_middle::ty::{ParamEnv, Ty};
@@ -35,13 +35,13 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                 else {
                     UnaryOp::BitwiseNegate
                 };
-            self.cx.context.new_unary_op(None, operation, typ, a)
+            self.cx.context.new_unary_op(self.loc, operation, typ, a)
         }
         else {
             let element_type = typ.dyncast_array().expect("element type");
             self.from_low_high_rvalues(typ,
-                self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.low(a)),
-                self.cx.context.new_unary_op(None, UnaryOp::BitwiseNegate, element_type, self.high(a)),
+                self.cx.context.new_unary_op(self.loc, UnaryOp::BitwiseNegate, element_type, self.low(a)),
+                self.cx.context.new_unary_op(self.loc, UnaryOp::BitwiseNegate, element_type, self.high(a)),
             )
         }
     }
@@ -49,7 +49,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
     pub fn gcc_neg(&self, a: RValue<'gcc>) -> RValue<'gcc> {
         let a_type = a.get_type();
         if self.is_native_int_type(a_type) || a_type.is_vector() {
-            self.cx.context.new_unary_op(None, UnaryOp::Minus, a.get_type(), a)
+            self.cx.context.new_unary_op(self.loc, UnaryOp::Minus, a.get_type(), a)
         }
         else {
             self.gcc_add(self.gcc_not(a), self.gcc_int(a_type, 1))
@@ -57,7 +57,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
     }
 
     pub fn gcc_and(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b)
+        self.cx.bitwise_operation(BinaryOp::BitwiseAnd, a, b, self.loc)
     }
 
     pub fn gcc_lshr(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
@@ -69,7 +69,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by a signed number.
             // TODO(antoyo): cast to unsigned to do a logical shift if that does not work.
             if a_type.is_signed(self) != b_type.is_signed(self) {
-                let b = self.context.new_cast(None, b, a_type);
+                let b = self.context.new_cast(self.loc, b, a_type);
                 a >> b
             }
             else {
@@ -95,14 +95,14 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             let b0_block = func.new_block("b0");
             let actual_else_block = func.new_block("actual_else");
 
-            let result = func.new_local(None, a_type, "shiftResult");
+            let result = func.new_local(self.loc, a_type, "shiftResult");
 
             let sixty_four = self.gcc_int(native_int_type, 64);
             let sixty_three = self.gcc_int(native_int_type, 63);
             let zero = self.gcc_zero(native_int_type);
             let b = self.gcc_int_cast(b, native_int_type);
             let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
-            self.llbb().end_with_conditional(None, condition, then_block, else_block);
+            self.llbb().end_with_conditional(self.loc, condition, then_block, else_block);
 
             let shift_value = self.gcc_sub(b, sixty_four);
             let high = self.high(a);
@@ -114,27 +114,27 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                     zero
                 };
             let array_value = self.from_low_high_rvalues(a_type, high >> shift_value, sign);
-            then_block.add_assignment(None, result, array_value);
-            then_block.end_with_jump(None, after_block);
+            then_block.add_assignment(self.loc, result, array_value);
+            then_block.end_with_jump(self.loc, after_block);
 
             let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
-            else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
+            else_block.end_with_conditional(self.loc, condition, b0_block, actual_else_block);
 
-            b0_block.add_assignment(None, result, a);
-            b0_block.end_with_jump(None, after_block);
+            b0_block.add_assignment(self.loc, result, a);
+            b0_block.end_with_jump(self.loc, after_block);
 
             let shift_value = self.gcc_sub(sixty_four, b);
             // NOTE: cast low to its unsigned type in order to perform a logical right shift.
             let unsigned_type = native_int_type.to_unsigned(&self.cx);
-            let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
-            let shifted_low = casted_low >> self.context.new_cast(None, b, unsigned_type);
-            let shifted_low = self.context.new_cast(None, shifted_low, native_int_type);
+            let casted_low = self.context.new_cast(self.loc, self.low(a), unsigned_type);
+            let shifted_low = casted_low >> self.context.new_cast(self.loc, b, unsigned_type);
+            let shifted_low = self.context.new_cast(self.loc, shifted_low, native_int_type);
             let array_value = self.from_low_high_rvalues(a_type,
                 (high << shift_value) | shifted_low,
                 high >> b,
             );
-            actual_else_block.add_assignment(None, result, array_value);
-            actual_else_block.end_with_jump(None, after_block);
+            actual_else_block.add_assignment(self.loc, result, array_value);
+            actual_else_block.end_with_jump(self.loc, after_block);
 
             // NOTE: since jumps were added in a place rustc does not expect, the current block in the
             // state need to be updated.
@@ -152,13 +152,13 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                 if a_type.is_vector() {
                     // Vector types need to be bitcast.
                     // TODO(antoyo): perhaps use __builtin_convertvector for vector casting.
-                    b = self.context.new_bitcast(None, b, a.get_type());
+                    b = self.context.new_bitcast(self.loc, b, a.get_type());
                 }
                 else {
-                    b = self.context.new_cast(None, b, a.get_type());
+                    b = self.context.new_cast(self.loc, b, a.get_type());
                 }
             }
-            self.context.new_binary_op(None, operation, a_type, a, b)
+            self.context.new_binary_op(self.loc, operation, a_type, a, b)
         }
         else {
             debug_assert!(a_type.dyncast_array().is_some());
@@ -172,10 +172,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                     (BinaryOp::Minus, false) => "__rust_u128_sub",
                     _ => unreachable!("unexpected additive operation {:?}", operation),
                 };
-            let param_a = self.context.new_parameter(None, a_type, "a");
-            let param_b = self.context.new_parameter(None, b_type, "b");
-            let func = self.context.new_function(None, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
-            self.context.new_call(None, func, &[a, b])
+            let param_a = self.context.new_parameter(self.loc, a_type, "a");
+            let param_b = self.context.new_parameter(self.loc, b_type, "b");
+            let func = self.context.new_function(self.loc, FunctionType::Extern, a_type, &[param_a, param_b], func_name, false);
+            self.context.new_call(self.loc, func, &[a, b])
         }
     }
 
@@ -335,10 +335,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let intrinsic = self.context.get_builtin_function(&name);
         let res = self.current_func()
             // TODO(antoyo): is it correct to use rhs type instead of the parameter typ?
-            .new_local(None, rhs.get_type(), "binopResult")
-            .get_address(None);
+            .new_local(self.loc, rhs.get_type(), "binopResult")
+            .get_address(self.loc);
         let overflow = self.overflow_call(intrinsic, &[lhs, rhs, res], None);
-        (res.dereference(None).to_rvalue(), overflow)
+        (res.dereference(self.loc).to_rvalue(), overflow)
     }
 
     pub fn operation_with_overflow(&self, func_name: &str, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> (RValue<'gcc>, RValue<'gcc>) {
@@ -346,10 +346,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let b_type = rhs.get_type();
         debug_assert!(a_type.dyncast_array().is_some());
         debug_assert!(b_type.dyncast_array().is_some());
-        let param_a = self.context.new_parameter(None, a_type, "a");
-        let param_b = self.context.new_parameter(None, b_type, "b");
-        let result_field = self.context.new_field(None, a_type, "result");
-        let overflow_field = self.context.new_field(None, self.bool_type, "overflow");
+        let param_a = self.context.new_parameter(self.loc, a_type, "a");
+        let param_b = self.context.new_parameter(self.loc, b_type, "b");
+        let result_field = self.context.new_field(self.loc, a_type, "result");
+        let overflow_field = self.context.new_field(self.loc, self.bool_type, "overflow");
 
         let ret_ty = Ty::new_tup(self.tcx, &[self.tcx.types.i128, self.tcx.types.bool]);
         let layout = self.tcx.layout_of(ParamEnv::reveal_all().and(ret_ty)).unwrap();
@@ -372,23 +372,23 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 
         let indirect = matches!(fn_abi.ret.mode, PassMode::Indirect { .. });
 
-        let return_type = self.context.new_struct_type(None, "result_overflow", &[result_field, overflow_field]);
+        let return_type = self.context.new_struct_type(self.loc, "result_overflow", &[result_field, overflow_field]);
         let result =
             if indirect {
-                let return_value = self.current_func().new_local(None, return_type.as_type(), "return_value");
+                let return_value = self.current_func().new_local(self.loc, return_type.as_type(), "return_value");
                 let return_param_type = return_type.as_type().make_pointer();
-                let return_param = self.context.new_parameter(None, return_param_type, "return_value");
-                let func = self.context.new_function(None, FunctionType::Extern, self.type_void(), &[return_param, param_a, param_b], func_name, false);
-                self.llbb().add_eval(None, self.context.new_call(None, func, &[return_value.get_address(None), lhs, rhs]));
+                let return_param = self.context.new_parameter(self.loc, return_param_type, "return_value");
+                let func = self.context.new_function(self.loc, FunctionType::Extern, self.type_void(), &[return_param, param_a, param_b], func_name, false);
+                self.llbb().add_eval(self.loc, self.context.new_call(self.loc, func, &[return_value.get_address(self.loc), lhs, rhs]));
                 return_value.to_rvalue()
             }
             else {
-                let func = self.context.new_function(None, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
-                self.context.new_call(None, func, &[lhs, rhs])
+                let func = self.context.new_function(self.loc, FunctionType::Extern, return_type.as_type(), &[param_a, param_b], func_name, false);
+                self.context.new_call(self.loc, func, &[lhs, rhs])
             };
-        let overflow = result.access_field(None, overflow_field);
-        let int_result = result.access_field(None, result_field);
-        return (int_result, overflow);
+        let overflow = result.access_field(self.loc, overflow_field);
+        let int_result = result.access_field(self.loc, result_field);
+        (int_result, overflow)
     }
 
     pub fn gcc_icmp(&mut self, op: IntPredicate, mut lhs: RValue<'gcc>, mut rhs: RValue<'gcc>) -> RValue<'gcc> {
@@ -397,7 +397,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         if self.is_non_native_int_type(a_type) || self.is_non_native_int_type(b_type) {
             // This algorithm is based on compiler-rt's __cmpti2:
             // https://github.com/llvm-mirror/compiler-rt/blob/f0745e8476f069296a7c71accedd061dce4cdf79/lib/builtins/cmpti2.c#L21
-            let result = self.current_func().new_local(None, self.int_type, "icmp_result");
+            let result = self.current_func().new_local(self.loc, self.int_type, "icmp_result");
             let block1 = self.current_func().new_block("block1");
             let block2 = self.current_func().new_block("block2");
             let block3 = self.current_func().new_block("block3");
@@ -413,35 +413,35 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             // the sign is only on high).
             let unsigned_type = native_int_type.to_unsigned(&self.cx);
 
-            let lhs_low = self.context.new_cast(None, self.low(lhs), unsigned_type);
-            let rhs_low = self.context.new_cast(None, self.low(rhs), unsigned_type);
+            let lhs_low = self.context.new_cast(self.loc, self.low(lhs), unsigned_type);
+            let rhs_low = self.context.new_cast(self.loc, self.low(rhs), unsigned_type);
 
-            let condition = self.context.new_comparison(None, ComparisonOp::LessThan, self.high(lhs), self.high(rhs));
-            self.llbb().end_with_conditional(None, condition, block1, block2);
+            let condition = self.context.new_comparison(self.loc, ComparisonOp::LessThan, self.high(lhs), self.high(rhs));
+            self.llbb().end_with_conditional(self.loc, condition, block1, block2);
 
-            block1.add_assignment(None, result, self.context.new_rvalue_zero(self.int_type));
-            block1.end_with_jump(None, after);
+            block1.add_assignment(self.loc, result, self.context.new_rvalue_zero(self.int_type));
+            block1.end_with_jump(self.loc, after);
 
-            let condition = self.context.new_comparison(None, ComparisonOp::GreaterThan, self.high(lhs), self.high(rhs));
-            block2.end_with_conditional(None, condition, block3, block4);
+            let condition = self.context.new_comparison(self.loc, ComparisonOp::GreaterThan, self.high(lhs), self.high(rhs));
+            block2.end_with_conditional(self.loc, condition, block3, block4);
 
-            block3.add_assignment(None, result, self.context.new_rvalue_from_int(self.int_type, 2));
-            block3.end_with_jump(None, after);
+            block3.add_assignment(self.loc, result, self.context.new_rvalue_from_int(self.int_type, 2));
+            block3.end_with_jump(self.loc, after);
 
-            let condition = self.context.new_comparison(None, ComparisonOp::LessThan, lhs_low, rhs_low);
-            block4.end_with_conditional(None, condition, block5, block6);
+            let condition = self.context.new_comparison(self.loc, ComparisonOp::LessThan, lhs_low, rhs_low);
+            block4.end_with_conditional(self.loc, condition, block5, block6);
 
-            block5.add_assignment(None, result, self.context.new_rvalue_zero(self.int_type));
-            block5.end_with_jump(None, after);
+            block5.add_assignment(self.loc, result, self.context.new_rvalue_zero(self.int_type));
+            block5.end_with_jump(self.loc, after);
 
-            let condition = self.context.new_comparison(None, ComparisonOp::GreaterThan, lhs_low, rhs_low);
-            block6.end_with_conditional(None, condition, block7, block8);
+            let condition = self.context.new_comparison(self.loc, ComparisonOp::GreaterThan, lhs_low, rhs_low);
+            block6.end_with_conditional(self.loc, condition, block7, block8);
 
-            block7.add_assignment(None, result, self.context.new_rvalue_from_int(self.int_type, 2));
-            block7.end_with_jump(None, after);
+            block7.add_assignment(self.loc, result, self.context.new_rvalue_from_int(self.int_type, 2));
+            block7.end_with_jump(self.loc, after);
 
-            block8.add_assignment(None, result, self.context.new_rvalue_one(self.int_type));
-            block8.end_with_jump(None, after);
+            block8.add_assignment(self.loc, result, self.context.new_rvalue_one(self.int_type));
+            block8.end_with_jump(self.loc, after);
 
             // NOTE: since jumps were added in a place rustc does not expect, the current block in the
             // state need to be updated.
@@ -451,10 +451,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             let (op, limit) =
                 match op {
                     IntPredicate::IntEQ => {
-                        return self.context.new_comparison(None, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type));
+                        return self.context.new_comparison(self.loc, ComparisonOp::Equals, cmp, self.context.new_rvalue_one(self.int_type));
                     },
                     IntPredicate::IntNE => {
-                        return self.context.new_comparison(None, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type));
+                        return self.context.new_comparison(self.loc, ComparisonOp::NotEquals, cmp, self.context.new_rvalue_one(self.int_type));
                     },
                     // TODO(antoyo): cast to u128 for unsigned comparison. See below.
                     IntPredicate::IntUGT => (ComparisonOp::Equals, 2),
@@ -466,39 +466,39 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                     IntPredicate::IntSLT => (ComparisonOp::Equals, 0),
                     IntPredicate::IntSLE => (ComparisonOp::LessThanEquals, 1),
                 };
-            self.context.new_comparison(None, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit))
+            self.context.new_comparison(self.loc, op, cmp, self.context.new_rvalue_from_int(self.int_type, limit))
         }
         else if a_type.get_pointee().is_some() && b_type.get_pointee().is_some() {
             // NOTE: gcc cannot compare pointers to different objects, but rustc does that, so cast them to usize.
-            lhs = self.context.new_bitcast(None, lhs, self.usize_type);
-            rhs = self.context.new_bitcast(None, rhs, self.usize_type);
-            self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+            lhs = self.context.new_bitcast(self.loc, lhs, self.usize_type);
+            rhs = self.context.new_bitcast(self.loc, rhs, self.usize_type);
+            self.context.new_comparison(self.loc, op.to_gcc_comparison(), lhs, rhs)
         }
         else {
             if a_type != b_type {
                 // NOTE: because libgccjit cannot compare function pointers.
                 if a_type.dyncast_function_ptr_type().is_some() && b_type.dyncast_function_ptr_type().is_some() {
-                    lhs = self.context.new_cast(None, lhs, self.usize_type.make_pointer());
-                    rhs = self.context.new_cast(None, rhs, self.usize_type.make_pointer());
+                    lhs = self.context.new_cast(self.loc, lhs, self.usize_type.make_pointer());
+                    rhs = self.context.new_cast(self.loc, rhs, self.usize_type.make_pointer());
                 }
                 // NOTE: hack because we try to cast a vector type to the same vector type.
                 else if format!("{:?}", a_type) != format!("{:?}", b_type) {
-                    rhs = self.context.new_cast(None, rhs, a_type);
+                    rhs = self.context.new_cast(self.loc, rhs, a_type);
                 }
             }
             match op {
                 IntPredicate::IntUGT | IntPredicate::IntUGE | IntPredicate::IntULT | IntPredicate::IntULE => {
                     if !a_type.is_vector() {
                         let unsigned_type = a_type.to_unsigned(&self.cx);
-                        lhs = self.context.new_cast(None, lhs, unsigned_type);
-                        rhs = self.context.new_cast(None, rhs, unsigned_type);
+                        lhs = self.context.new_cast(self.loc, lhs, unsigned_type);
+                        rhs = self.context.new_cast(self.loc, rhs, unsigned_type);
                     }
                 },
                 // TODO(antoyo): we probably need to handle signed comparison for unsigned
                 // integers.
                 _ => (),
             }
-            self.context.new_comparison(None, op.to_gcc_comparison(), lhs, rhs)
+            self.context.new_comparison(self.loc, op.to_gcc_comparison(), lhs, rhs)
         }
     }
 
@@ -528,12 +528,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         if a_native && b_native {
             // FIXME(antoyo): remove the casts when libgccjit can shift an unsigned number by an unsigned number.
             if a_type.is_unsigned(self) && b_type.is_signed(self) {
-                let a = self.context.new_cast(None, a, b_type);
+                let a = self.context.new_cast(self.loc, a, b_type);
                 let result = a << b;
-                self.context.new_cast(None, result, a_type)
+                self.context.new_cast(self.loc, result, a_type)
             }
             else if a_type.is_signed(self) && b_type.is_unsigned(self) {
-                let b = self.context.new_cast(None, b, a_type);
+                let b = self.context.new_cast(self.loc, b, a_type);
                 a << b
             }
             else {
@@ -557,40 +557,40 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             let b0_block = func.new_block("b0");
             let actual_else_block = func.new_block("actual_else");
 
-            let result = func.new_local(None, a_type, "shiftResult");
+            let result = func.new_local(self.loc, a_type, "shiftResult");
 
             let b = self.gcc_int_cast(b, native_int_type);
             let sixty_four = self.gcc_int(native_int_type, 64);
             let zero = self.gcc_zero(native_int_type);
             let condition = self.gcc_icmp(IntPredicate::IntNE, self.gcc_and(b, sixty_four), zero);
-            self.llbb().end_with_conditional(None, condition, then_block, else_block);
+            self.llbb().end_with_conditional(self.loc, condition, then_block, else_block);
 
             let array_value = self.from_low_high_rvalues(a_type,
                 zero,
                 self.low(a) << (b - sixty_four),
             );
-            then_block.add_assignment(None, result, array_value);
-            then_block.end_with_jump(None, after_block);
+            then_block.add_assignment(self.loc, result, array_value);
+            then_block.end_with_jump(self.loc, after_block);
 
             let condition = self.gcc_icmp(IntPredicate::IntEQ, b, zero);
-            else_block.end_with_conditional(None, condition, b0_block, actual_else_block);
+            else_block.end_with_conditional(self.loc, condition, b0_block, actual_else_block);
 
-            b0_block.add_assignment(None, result, a);
-            b0_block.end_with_jump(None, after_block);
+            b0_block.add_assignment(self.loc, result, a);
+            b0_block.end_with_jump(self.loc, after_block);
 
             // NOTE: cast low to its unsigned type in order to perform a logical right shift.
             // TODO(antoyo): adjust this ^ comment.
             let unsigned_type = native_int_type.to_unsigned(&self.cx);
-            let casted_low = self.context.new_cast(None, self.low(a), unsigned_type);
-            let shift_value = self.context.new_cast(None, sixty_four - b, unsigned_type);
-            let high_low = self.context.new_cast(None, casted_low >> shift_value, native_int_type);
+            let casted_low = self.context.new_cast(self.loc, self.low(a), unsigned_type);
+            let shift_value = self.context.new_cast(self.loc, sixty_four - b, unsigned_type);
+            let high_low = self.context.new_cast(self.loc, casted_low >> shift_value, native_int_type);
 
             let array_value = self.from_low_high_rvalues(a_type,
                 self.low(a) << b,
                 (self.high(a) << b) | high_low,
             );
-            actual_else_block.add_assignment(None, result, array_value);
-            actual_else_block.end_with_jump(None, after_block);
+            actual_else_block.add_assignment(self.loc, result, array_value);
+            actual_else_block.end_with_jump(self.loc, after_block);
 
             // NOTE: since jumps were added in a place rustc does not expect, the current block in the
             // state need to be updated.
@@ -606,10 +606,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             let native_int_type = arg_type.dyncast_array().expect("get element type");
             let lsb = self.low(arg);
             let swapped_lsb = self.gcc_bswap(lsb, width / 2);
-            let swapped_lsb = self.context.new_cast(None, swapped_lsb, native_int_type);
+            let swapped_lsb = self.context.new_cast(self.loc, swapped_lsb, native_int_type);
             let msb = self.high(arg);
             let swapped_msb = self.gcc_bswap(msb, width / 2);
-            let swapped_msb = self.context.new_cast(None, swapped_msb, native_int_type);
+            let swapped_msb = self.context.new_cast(self.loc, swapped_msb, native_int_type);
 
             // NOTE: we also need to swap the two elements here, in addition to swapping inside
             // the elements themselves like done above.
@@ -625,7 +625,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         if param_type != arg_type {
             arg = self.bitcast(arg, param_type);
         }
-        self.cx.context.new_call(None, bswap, &[arg])
+        self.cx.context.new_call(self.loc, bswap, &[arg])
     }
 }
 
@@ -700,33 +700,33 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
         }
     }
 
-    fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>) -> RValue<'gcc> {
+    fn bitwise_operation(&self, operation: BinaryOp, a: RValue<'gcc>, mut b: RValue<'gcc>, loc: Option<Location<'gcc>>) -> RValue<'gcc> {
         let a_type = a.get_type();
         let b_type = b.get_type();
         let a_native = self.is_native_int_type_or_bool(a_type);
         let b_native = self.is_native_int_type_or_bool(b_type);
         if a_type.is_vector() && b_type.is_vector() {
             let b = self.bitcast_if_needed(b, a_type);
-            self.context.new_binary_op(None, operation, a_type, a, b)
+            self.context.new_binary_op(loc, operation, a_type, a, b)
         }
         else if a_native && b_native {
             if a_type != b_type {
-                b = self.context.new_cast(None, b, a_type);
+                b = self.context.new_cast(loc, b, a_type);
             }
-            self.context.new_binary_op(None, operation, a_type, a, b)
+            self.context.new_binary_op(loc, operation, a_type, a, b)
         }
         else {
             assert!(!a_native && !b_native, "both types should either be native or non-native for or operation");
             let native_int_type = a_type.dyncast_array().expect("get element type");
             self.from_low_high_rvalues(a_type,
-                self.context.new_binary_op(None, operation, native_int_type, self.low(a), self.low(b)),
-                self.context.new_binary_op(None, operation, native_int_type, self.high(a), self.high(b)),
+                self.context.new_binary_op(loc, operation, native_int_type, self.low(a), self.low(b)),
+                self.context.new_binary_op(loc, operation, native_int_type, self.high(a), self.high(b)),
             )
         }
     }
 
-    pub fn gcc_or(&self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> {
-        self.bitwise_operation(BinaryOp::BitwiseOr, a, b)
+    pub fn gcc_or(&self, a: RValue<'gcc>, b: RValue<'gcc>, loc: Option<Location<'gcc>>) -> RValue<'gcc> {
+        self.bitwise_operation(BinaryOp::BitwiseOr, a, b, loc)
     }
 
     // TODO(antoyo): can we use https://github.com/rust-lang/compiler-builtins/blob/master/src/int/mod.rs#L379 instead?
diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs
index f162ef831b7..0849c6266f1 100644
--- a/src/intrinsic/mod.rs
+++ b/src/intrinsic/mod.rs
@@ -640,7 +640,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                     let new_low = self.gcc_int_cast(reversed_high, typ);
                     let new_high = self.shl(self.gcc_int_cast(reversed_low, typ), sixty_four);
 
-                    self.gcc_or(new_low, new_high)
+                    self.gcc_or(new_low, new_high, self.loc)
                 },
                 _ => {
                     panic!("cannot bit reverse with width = {}", width);
@@ -685,44 +685,44 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                 let first_elem = self.context.new_array_access(None, result, zero);
                 let first_value = self.gcc_int_cast(self.context.new_call(None, clzll, &[high]), arg_type);
                 self.llbb()
-                    .add_assignment(None, first_elem, first_value);
+                    .add_assignment(self.loc, first_elem, first_value);
 
-                let second_elem = self.context.new_array_access(None, result, one);
-                let cast = self.gcc_int_cast(self.context.new_call(None, clzll, &[low]), arg_type);
+                let second_elem = self.context.new_array_access(self.loc, result, one);
+                let cast = self.gcc_int_cast(self.context.new_call(self.loc, clzll, &[low]), arg_type);
                 let second_value = self.add(cast, sixty_four);
                 self.llbb()
-                    .add_assignment(None, second_elem, second_value);
+                    .add_assignment(self.loc, second_elem, second_value);
 
-                let third_elem = self.context.new_array_access(None, result, two);
+                let third_elem = self.context.new_array_access(self.loc, result, two);
                 let third_value = self.const_uint(arg_type, 128);
                 self.llbb()
-                    .add_assignment(None, third_elem, third_value);
+                    .add_assignment(self.loc, third_elem, third_value);
 
-                let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
-                let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
+                let not_high = self.context.new_unary_op(self.loc, UnaryOp::LogicalNegate, self.u64_type, high);
+                let not_low = self.context.new_unary_op(self.loc, UnaryOp::LogicalNegate, self.u64_type, low);
                 let not_low_and_not_high = not_low & not_high;
                 let index = not_high + not_low_and_not_high;
                 // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
                 // gcc.
                 // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
                 // compilation stage.
-                let index = self.context.new_cast(None, index, self.i32_type);
+                let index = self.context.new_cast(self.loc, index, self.i32_type);
 
-                let res = self.context.new_array_access(None, result, index);
+                let res = self.context.new_array_access(self.loc, result, index);
 
                 return self.gcc_int_cast(res.to_rvalue(), arg_type);
             }
             else {
                 let count_leading_zeroes = self.context.get_builtin_function("__builtin_clzll");
-                let arg = self.context.new_cast(None, arg, self.ulonglong_type);
+                let arg = self.context.new_cast(self.loc, arg, self.ulonglong_type);
                 let diff = self.ulonglong_type.get_size() as i64 - arg_type.get_size() as i64;
                 let diff = self.context.new_rvalue_from_long(self.int_type, diff * 8);
-                let res = self.context.new_call(None, count_leading_zeroes, &[arg]) - diff;
-                return self.context.new_cast(None, res, arg_type);
+                let res = self.context.new_call(self.loc, count_leading_zeroes, &[arg]) - diff;
+                return self.context.new_cast(self.loc, res, arg_type);
             };
         let count_leading_zeroes = self.context.get_builtin_function(count_leading_zeroes);
-        let res = self.context.new_call(None, count_leading_zeroes, &[arg]);
-        self.context.new_cast(None, res, arg_type)
+        let res = self.context.new_call(self.loc, count_leading_zeroes, &[arg]);
+        self.context.new_cast(self.loc, res, arg_type)
     }
 
     fn count_trailing_zeroes(&mut self, _width: u64, arg: RValue<'gcc>) -> RValue<'gcc> {
@@ -766,58 +766,58 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
 
                 let ctzll = self.context.get_builtin_function("__builtin_ctzll");
 
-                let first_elem = self.context.new_array_access(None, result, zero);
-                let first_value = self.gcc_int_cast(self.context.new_call(None, ctzll, &[low]), arg_type);
+                let first_elem = self.context.new_array_access(self.loc, result, zero);
+                let first_value = self.gcc_int_cast(self.context.new_call(self.loc, ctzll, &[low]), arg_type);
                 self.llbb()
-                    .add_assignment(None, first_elem, first_value);
+                    .add_assignment(self.loc, first_elem, first_value);
 
-                let second_elem = self.context.new_array_access(None, result, one);
-                let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(None, ctzll, &[high]), arg_type), sixty_four);
+                let second_elem = self.context.new_array_access(self.loc, result, one);
+                let second_value = self.gcc_add(self.gcc_int_cast(self.context.new_call(self.loc, ctzll, &[high]), arg_type), sixty_four);
                 self.llbb()
-                    .add_assignment(None, second_elem, second_value);
+                    .add_assignment(self.loc, second_elem, second_value);
 
-                let third_elem = self.context.new_array_access(None, result, two);
+                let third_elem = self.context.new_array_access(self.loc, result, two);
                 let third_value = self.gcc_int(arg_type, 128);
                 self.llbb()
-                    .add_assignment(None, third_elem, third_value);
+                    .add_assignment(self.loc, third_elem, third_value);
 
-                let not_low = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, low);
-                let not_high = self.context.new_unary_op(None, UnaryOp::LogicalNegate, self.u64_type, high);
+                let not_low = self.context.new_unary_op(self.loc, UnaryOp::LogicalNegate, self.u64_type, low);
+                let not_high = self.context.new_unary_op(self.loc, UnaryOp::LogicalNegate, self.u64_type, high);
                 let not_low_and_not_high = not_low & not_high;
                 let index = not_low + not_low_and_not_high;
                 // NOTE: the following cast is necessary to avoid a GIMPLE verification failure in
                 // gcc.
                 // TODO(antoyo): do the correct verification in libgccjit to avoid an error at the
                 // compilation stage.
-                let index = self.context.new_cast(None, index, self.i32_type);
+                let index = self.context.new_cast(self.loc, index, self.i32_type);
 
-                let res = self.context.new_array_access(None, result, index);
+                let res = self.context.new_array_access(self.loc, result, index);
 
                 return self.gcc_int_cast(res.to_rvalue(), result_type);
             }
             else {
                 let count_trailing_zeroes = self.context.get_builtin_function("__builtin_ctzll");
                 let arg_size = arg_type.get_size();
-                let casted_arg = self.context.new_cast(None, arg, self.ulonglong_type);
+                let casted_arg = self.context.new_cast(self.loc, arg, self.ulonglong_type);
                 let byte_diff = self.ulonglong_type.get_size() as i64 - arg_size as i64;
                 let diff = self.context.new_rvalue_from_long(self.int_type, byte_diff * 8);
                 let mask = self.context.new_rvalue_from_long(arg_type, -1); // To get the value with all bits set.
-                let masked = mask & self.context.new_unary_op(None, UnaryOp::BitwiseNegate, arg_type, arg);
-                let cond = self.context.new_comparison(None, ComparisonOp::Equals, masked, mask);
-                let diff = diff * self.context.new_cast(None, cond, self.int_type);
-                let res = self.context.new_call(None, count_trailing_zeroes, &[casted_arg]) - diff;
-                return self.context.new_cast(None, res, result_type);
+                let masked = mask & self.context.new_unary_op(self.loc, UnaryOp::BitwiseNegate, arg_type, arg);
+                let cond = self.context.new_comparison(self.loc, ComparisonOp::Equals, masked, mask);
+                let diff = diff * self.context.new_cast(self.loc, cond, self.int_type);
+                let res = self.context.new_call(self.loc, count_trailing_zeroes, &[casted_arg]) - diff;
+                return self.context.new_cast(self.loc, res, result_type);
             };
         let count_trailing_zeroes = self.context.get_builtin_function(count_trailing_zeroes);
         let arg =
             if arg_type != expected_type {
-                self.context.new_cast(None, arg, expected_type)
+                self.context.new_cast(self.loc, arg, expected_type)
             }
             else {
                 arg
             };
-        let res = self.context.new_call(None, count_trailing_zeroes, &[arg]);
-        self.context.new_cast(None, res, result_type)
+        let res = self.context.new_call(self.loc, count_trailing_zeroes, &[arg]);
+        self.context.new_cast(self.loc, res, result_type)
     }
 
     fn pop_count(&mut self, value: RValue<'gcc>) -> RValue<'gcc> {
@@ -859,8 +859,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let counter = self.current_func().new_local(None, counter_type, "popcount_counter");
         let val = self.current_func().new_local(None, value_type, "popcount_value");
         let zero = self.gcc_zero(counter_type);
-        self.llbb().add_assignment(None, counter, zero);
-        self.llbb().add_assignment(None, val, value);
+        self.llbb().add_assignment(self.loc, counter, zero);
+        self.llbb().add_assignment(self.loc, val, value);
         self.br(loop_head);
 
         // check if value isn't zero
@@ -874,12 +874,12 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         let one = self.gcc_int(value_type, 1);
         let sub = self.gcc_sub(val.to_rvalue(), one);
         let op = self.gcc_and(val.to_rvalue(), sub);
-        loop_body.add_assignment(None, val, op);
+        loop_body.add_assignment(self.loc, val, op);
 
         // counter += 1
         let one = self.gcc_int(counter_type, 1);
         let op = self.gcc_add(counter.to_rvalue(), one);
-        loop_body.add_assignment(None, counter, op);
+        loop_body.add_assignment(self.loc, counter, op);
         self.br(loop_head);
 
         // end of loop
@@ -922,7 +922,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         if signed {
             // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
             let func = self.current_func.borrow().expect("func");
-            let res = func.new_local(None, result_type, "saturating_sum");
+            let res = func.new_local(self.loc, result_type, "saturating_sum");
             let supports_native_type = self.is_native_int_type(result_type);
             let overflow =
                 if supports_native_type {
@@ -936,7 +936,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                             _ => unreachable!(),
                         };
                     let overflow_func = self.context.get_builtin_function(func_name);
-                    self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
+                    self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(self.loc)], None)
                 }
                 else {
                     let func_name =
@@ -945,7 +945,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                             _ => unreachable!(),
                         };
                     let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
-                    self.llbb().add_assignment(None, res, int_result);
+                    self.llbb().add_assignment(self.loc, res, int_result);
                     overflow
                 };
 
@@ -958,10 +958,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
             let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
             let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
-            then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
-            then_block.end_with_jump(None, after_block);
+            then_block.add_assignment(self.loc, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
+            then_block.end_with_jump(self.loc, after_block);
 
-            self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+            self.llbb().end_with_conditional(self.loc, overflow, then_block, after_block);
 
             // NOTE: since jumps were added in a place rustc does not
             // expect, the current block in the state need to be updated.
@@ -974,7 +974,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             let res = self.gcc_add(lhs, rhs);
             let cond = self.gcc_icmp(IntPredicate::IntULT, res, lhs);
             let value = self.gcc_neg(self.gcc_int_cast(cond, result_type));
-            self.gcc_or(res, value)
+            self.gcc_or(res, value, self.loc)
         }
     }
 
@@ -984,7 +984,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
         if signed {
             // Based on algorithm from: https://stackoverflow.com/a/56531252/389119
             let func = self.current_func.borrow().expect("func");
-            let res = func.new_local(None, result_type, "saturating_diff");
+            let res = func.new_local(self.loc, result_type, "saturating_diff");
             let supports_native_type = self.is_native_int_type(result_type);
             let overflow =
                 if supports_native_type {
@@ -998,7 +998,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                             _ => unreachable!(),
                         };
                     let overflow_func = self.context.get_builtin_function(func_name);
-                    self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(None)], None)
+                    self.overflow_call(overflow_func, &[lhs, rhs, res.get_address(self.loc)], None)
                 }
                 else {
                     let func_name =
@@ -1007,7 +1007,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
                             _ => unreachable!(),
                         };
                     let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs);
-                    self.llbb().add_assignment(None, res, int_result);
+                    self.llbb().add_assignment(self.loc, res, int_result);
                     overflow
                 };
 
@@ -1020,10 +1020,10 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> {
             let shifted = self.gcc_lshr(self.gcc_int_cast(lhs, unsigned_type), self.gcc_int(unsigned_type, width as i64 - 1));
             let uint_max = self.gcc_not(self.gcc_int(unsigned_type, 0));
             let int_max = self.gcc_lshr(uint_max, self.gcc_int(unsigned_type, 1));
-            then_block.add_assignment(None, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
-            then_block.end_with_jump(None, after_block);
+            then_block.add_assignment(self.loc, res, self.gcc_int_cast(self.gcc_add(shifted, int_max), result_type));
+            then_block.end_with_jump(self.loc, after_block);
 
-            self.llbb().end_with_conditional(None, overflow, then_block, after_block);
+            self.llbb().end_with_conditional(self.loc, overflow, then_block, after_block);
 
             // NOTE: since jumps were added in a place rustc does not
             // expect, the current block in the state need to be updated.