diff options
Diffstat (limited to 'src/librustc_codegen_llvm')
| -rw-r--r-- | src/librustc_codegen_llvm/abi.rs | 14 | ||||
| -rw-r--r-- | src/librustc_codegen_llvm/asm.rs | 12 | ||||
| -rw-r--r-- | src/librustc_codegen_llvm/builder.rs | 58 | ||||
| -rw-r--r-- | src/librustc_codegen_llvm/debuginfo/gdb.rs | 6 | ||||
| -rw-r--r-- | src/librustc_codegen_llvm/debuginfo/source_loc.rs | 4 | ||||
| -rw-r--r-- | src/librustc_codegen_llvm/intrinsic.rs | 238 |
6 files changed, 166 insertions, 166 deletions
diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 3470d6fd0e7..5b6d157043d 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -212,7 +212,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_ptr_llty = bx.cx().type_ptr_to(cast.llvm_type(bx.cx())); + let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx)); let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); bx.store(val, cast_dst, self.layout.align.abi); } else { @@ -231,9 +231,9 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // bitcasting to the struct type yields invalid cast errors. // We instead thus allocate some scratch space... - let scratch_size = cast.size(bx.cx()); - let scratch_align = cast.align(bx.cx()); - let llscratch = bx.alloca(cast.llvm_type(bx.cx()), "abi_cast", scratch_align); + let scratch_size = cast.size(bx); + let scratch_align = cast.align(bx); + let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align); bx.lifetime_start(llscratch, scratch_size); // ...where we first store the value... @@ -245,7 +245,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { self.layout.align.abi, llscratch, scratch_align, - bx.cx().const_usize(self.layout.size.bytes()), + bx.const_usize(self.layout.size.bytes()), MemFlags::empty() ); @@ -299,7 +299,7 @@ impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> { ty.store(self, val, dst) } fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type { - ty.memory_ty(self.cx()) + ty.memory_ty(self) } } @@ -780,7 +780,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { // by the LLVM verifier. if let layout::Int(..) = scalar.value { if !scalar.is_bool() { - let range = scalar.valid_range_exclusive(bx.cx()); + let range = scalar.valid_range_exclusive(bx); if range.start != range.end { bx.range_metadata(callsite, range); } diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index efbe7cad138..294596cea5f 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -57,7 +57,7 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { // Default per-arch clobbers // Basically what clang does - let arch_clobbers = match &self.cx().sess().target.target.arch[..] { + let arch_clobbers = match &self.sess().target.target.arch[..] { "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], "mips" | "mips64" => vec!["~{$1}"], _ => Vec::new() @@ -76,9 +76,9 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { // Depending on how many outputs we have, the return type is different let num_outputs = output_types.len(); let output_type = match num_outputs { - 0 => self.cx().type_void(), + 0 => self.type_void(), 1 => output_types[0], - _ => self.cx().type_struct(&output_types, false) + _ => self.type_struct(&output_types, false) }; let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); @@ -108,13 +108,13 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { // back to source locations. See #17552. unsafe { let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext(self.cx().llcx, + let kind = llvm::LLVMGetMDKindIDInContext(self.llcx, key.as_ptr() as *const c_char, key.len() as c_uint); - let val: &'ll Value = self.cx().const_i32(ia.ctxt.outer().as_u32() as i32); + let val: &'ll Value = self.const_i32(ia.ctxt.outer().as_u32() as i32); llvm::LLVMSetMetadata(r, kind, - llvm::LLVMMDNodeInContext(self.cx().llcx, &val, 1)); + llvm::LLVMMDNodeInContext(self.llcx, &val, 1)); } true diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 907a5db143a..91c650f1b53 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -143,11 +143,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn count_insn(&self, category: &str) { - if self.cx().sess().codegen_stats() { - self.cx().stats.borrow_mut().n_llvm_insns += 1; + if self.sess().codegen_stats() { + self.stats.borrow_mut().n_llvm_insns += 1; } - if self.cx().sess().count_llvm_insns() { - *self.cx().stats + if self.sess().count_llvm_insns() { + *self.stats .borrow_mut() .llvm_insns .entry(category.to_string()) @@ -475,8 +475,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { use rustc::ty::{Int, Uint}; let new_sty = match ty.sty { - Int(Isize) => Int(self.cx().tcx.sess.target.isize_ty), - Uint(Usize) => Uint(self.cx().tcx.sess.target.usize_ty), + Int(Isize) => Int(self.tcx.sess.target.isize_ty), + Uint(Usize) => Uint(self.tcx.sess.target.usize_ty), ref t @ Uint(_) | ref t @ Int(_) => t.clone(), _ => panic!("tried to get overflow intrinsic for op applied to non-int type") }; @@ -529,7 +529,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }, }; - let intrinsic = self.cx().get_intrinsic(&name); + let intrinsic = self.get_intrinsic(&name); let res = self.call(intrinsic, &[lhs, rhs], None); ( self.extract_value(res, 0), @@ -637,7 +637,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let vr = scalar.valid_range.clone(); match scalar.value { layout::Int(..) => { - let range = scalar.valid_range_exclusive(bx.cx()); + let range = scalar.valid_range_exclusive(bx); if range.start != range.end { bx.range_metadata(load, range); } @@ -676,7 +676,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let load = self.load(llptr, align); scalar_load_metadata(self, load, scalar); if scalar.is_bool() { - self.trunc(load, self.cx().type_i1()) + self.trunc(load, self.type_i1()) } else { load } @@ -696,7 +696,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) { - if self.cx().sess().target.target.arch == "amdgpu" { + if self.sess().target.target.arch == "amdgpu" { // amdgpu/LLVM does something weird and thinks a i64 value is // split into a v2i32, halving the bitwidth LLVM expects, // tripping an assertion. So, for now, just disable this @@ -942,7 +942,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }).collect::<Vec<_>>(); debug!("Asm Output Type: {:?}", output); - let fty = self.cx().type_func(&argtys[..], output); + let fty = self.type_func(&argtys[..], output); unsafe { // Ask LLVM to verify that the constraints are well-formed. let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr()); @@ -970,14 +970,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memcpy. let val = self.load(src, src_align); - let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val))); + let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); self.store_with_flags(val, ptr, dst_align, flags); return; } - let size = self.intcast(size, self.cx().type_isize(), false); + let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.cx().type_i8p()); - let src = self.pointercast(src, self.cx().type_i8p()); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_i8p()); unsafe { llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint, src, src_align.bytes() as c_uint, size, is_volatile); @@ -990,14 +990,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { if flags.contains(MemFlags::NONTEMPORAL) { // HACK(nox): This is inefficient but there is no nontemporal memmove. let val = self.load(src, src_align); - let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val))); + let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val))); self.store_with_flags(val, ptr, dst_align, flags); return; } - let size = self.intcast(size, self.cx().type_isize(), false); + let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.cx().type_i8p()); - let src = self.pointercast(src, self.cx().type_i8p()); + let dst = self.pointercast(dst, self.type_i8p()); + let src = self.pointercast(src, self.type_i8p()); unsafe { llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint, src, src_align.bytes() as c_uint, size, is_volatile); @@ -1012,12 +1012,12 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { align: Align, flags: MemFlags, ) { - let ptr_width = &self.cx().sess().target.target.target_pointer_width; + let ptr_width = &self.sess().target.target.target_pointer_width; let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); - let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key); - let ptr = self.pointercast(ptr, self.cx().type_i8p()); - let align = self.cx().const_u32(align.bytes() as u32); - let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE)); + let llintrinsicfn = self.get_intrinsic(&intrinsic_key); + let ptr = self.pointercast(ptr, self.type_i8p()); + let align = self.const_u32(align.bytes() as u32); + let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE)); self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); } @@ -1083,10 +1083,10 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { let elt_ty = self.cx.val_ty(elt); - let undef = llvm::LLVMGetUndef(self.cx().type_vector(elt_ty, num_elts as u64)); + let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64)); let vec = self.insert_element(undef, elt, self.cx.const_i32(0)); - let vec_i32_ty = self.cx().type_vector(self.cx().type_i32(), num_elts as u64); - self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty)) + let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64); + self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty)) } } @@ -1397,7 +1397,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let param_tys = self.cx.func_params_types(fn_ty); let all_args_match = param_tys.iter() - .zip(args.iter().map(|&v| self.cx().val_ty(v))) + .zip(args.iter().map(|&v| self.val_ty(v))) .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); if all_args_match { @@ -1408,7 +1408,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { .zip(args.iter()) .enumerate() .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = self.cx().val_ty(actual_val); + let actual_ty = self.val_ty(actual_val); if expected_ty != actual_ty { debug!("Type mismatch in function call of {:?}. \ Expected {:?} for param {}, got {:?}; injecting bitcast", diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index 0046a072366..4be93d826b8 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -24,11 +24,11 @@ use syntax::attr; /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder) { - if needs_gdb_debug_scripts_section(bx.cx()) { - let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx()); + if needs_gdb_debug_scripts_section(bx) { + let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. - let indices = [bx.cx().const_i32(0), bx.cx().const_i32(0)]; + let indices = [bx.const_i32(0), bx.const_i32(0)]; let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); let volative_load_instruction = bx.volatile_load(element); unsafe { diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index c6772e8c98e..95196287ab6 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -41,7 +41,7 @@ pub fn set_source_location<D>( }; let dbg_loc = if function_debug_context.source_locations_enabled.get() { - debug!("set_source_location: {}", bx.cx().sess().source_map().span_to_string(span)); + debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span)); let loc = span_start(bx.cx(), span); InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize()) } else { @@ -76,7 +76,7 @@ pub fn set_debug_location( // For MSVC, set the column number to zero. // Otherwise, emit it. This mimics clang behaviour. // See discussion in https://github.com/rust-lang/rust/issues/42921 - let col_used = if bx.cx().sess().target.target.options.is_like_msvc { + let col_used = if bx.sess().target.target.options.is_like_msvc { UNKNOWN_COLUMN_NUMBER } else { col as c_uint diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 54a2684b63f..92c6d56a3d5 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -96,7 +96,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { llresult: &'ll Value, span: Span, ) { - let tcx = self.cx().tcx; + let tcx = self.tcx; let (def_id, substs) = match callee_ty.sty { ty::FnDef(def_id, substs) => (def_id, substs), @@ -109,10 +109,10 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let ret_ty = sig.output(); let name = &*tcx.item_name(def_id).as_str(); - let llret_ty = self.cx().layout_of(ret_ty).llvm_type(self.cx()); + let llret_ty = self.layout_of(ret_ty).llvm_type(self); let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi); - let simple = get_simple_intrinsic(self.cx(), name); + let simple = get_simple_intrinsic(self, name); let llval = match name { _ if simple.is_some() => { self.call(simple.unwrap(), @@ -123,12 +123,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { return; }, "likely" => { - let expect = self.cx().get_intrinsic(&("llvm.expect.i1")); - self.call(expect, &[args[0].immediate(), self.cx().const_bool(true)], None) + let expect = self.get_intrinsic(&("llvm.expect.i1")); + self.call(expect, &[args[0].immediate(), self.const_bool(true)], None) } "unlikely" => { - let expect = self.cx().get_intrinsic(&("llvm.expect.i1")); - self.call(expect, &[args[0].immediate(), self.cx().const_bool(false)], None) + let expect = self.get_intrinsic(&("llvm.expect.i1")); + self.call(expect, &[args[0].immediate(), self.const_bool(false)], None) } "try" => { try_intrinsic(self, @@ -139,12 +139,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { return; } "breakpoint" => { - let llfn = self.cx().get_intrinsic(&("llvm.debugtrap")); + let llfn = self.get_intrinsic(&("llvm.debugtrap")); self.call(llfn, &[], None) } "size_of" => { let tp_ty = substs.type_at(0); - self.cx().const_usize(self.cx().size_of(tp_ty).bytes()) + self.const_usize(self.size_of(tp_ty).bytes()) } "size_of_val" => { let tp_ty = substs.type_at(0); @@ -153,12 +153,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { glue::size_and_align_of_dst(self, tp_ty, Some(meta)); llsize } else { - self.cx().const_usize(self.cx().size_of(tp_ty).bytes()) + self.const_usize(self.size_of(tp_ty).bytes()) } } "min_align_of" => { let tp_ty = substs.type_at(0); - self.cx().const_usize(self.cx().align_of(tp_ty).bytes()) + self.const_usize(self.align_of(tp_ty).bytes()) } "min_align_of_val" => { let tp_ty = substs.type_at(0); @@ -167,24 +167,24 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { glue::size_and_align_of_dst(self, tp_ty, Some(meta)); llalign } else { - self.cx().const_usize(self.cx().align_of(tp_ty).bytes()) + self.const_usize(self.align_of(tp_ty).bytes()) } } "pref_align_of" => { let tp_ty = substs.type_at(0); - self.cx().const_usize(self.cx().layout_of(tp_ty).align.pref.bytes()) + self.const_usize(self.layout_of(tp_ty).align.pref.bytes()) } "type_name" => { let tp_ty = substs.type_at(0); let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); - self.cx().const_str_slice(ty_name) + self.const_str_slice(ty_name) } "type_id" => { - self.cx().const_u64(self.cx().tcx.type_id_hash(substs.type_at(0))) + self.const_u64(self.tcx.type_id_hash(substs.type_at(0))) } "init" => { let ty = substs.type_at(0); - if !self.cx().layout_of(ty).is_zst() { + if !self.layout_of(ty).is_zst() { // Just zero out the stack slot. // If we store a zero constant, LLVM will drown in vreg allocation for large // data structures, and the generated code will be awful. (A telltale sign of @@ -194,8 +194,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { false, ty, llresult, - self.cx().const_u8(0), - self.cx().const_usize(1) + self.const_u8(0), + self.const_usize(1) ); } return; @@ -207,7 +207,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { "needs_drop" => { let tp_ty = substs.type_at(0); - self.cx().const_bool(self.cx().type_needs_drop(tp_ty)) + self.const_bool(self.type_needs_drop(tp_ty)) } "offset" => { let ptr = args[0].immediate(); @@ -255,18 +255,18 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let tp_ty = substs.type_at(0); let mut ptr = args[0].immediate(); if let PassMode::Cast(ty) = fn_ty.ret.mode { - ptr = self.pointercast(ptr, self.cx().type_ptr_to(ty.llvm_type(self.cx()))); + ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self))); } let load = self.volatile_load(ptr); let align = if name == "unaligned_volatile_load" { 1 } else { - self.cx().align_of(tp_ty).bytes() as u32 + self.align_of(tp_ty).bytes() as u32 }; unsafe { llvm::LLVMSetAlignment(load, align); } - to_immediate(self, load, self.cx().layout_of(tp_ty)) + to_immediate(self, load, self.layout_of(tp_ty)) }, "volatile_store" => { let dst = args[0].deref(self.cx()); @@ -280,7 +280,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { }, "prefetch_read_data" | "prefetch_write_data" | "prefetch_read_instruction" | "prefetch_write_instruction" => { - let expect = self.cx().get_intrinsic(&("llvm.prefetch")); + let expect = self.get_intrinsic(&("llvm.prefetch")); let (rw, cache_type) = match name { "prefetch_read_data" => (0, 1), "prefetch_write_data" => (1, 1), @@ -290,9 +290,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { }; self.call(expect, &[ args[0].immediate(), - self.cx().const_i32(rw), + self.const_i32(rw), args[1].immediate(), - self.cx().const_i32(cache_type) + self.const_i32(cache_type) ], None) }, "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | @@ -301,24 +301,24 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" | "rotate_left" | "rotate_right" => { let ty = arg_tys[0]; - match int_type_width_signed(ty, self.cx()) { + match int_type_width_signed(ty, self) { Some((width, signed)) => match name { "ctlz" | "cttz" => { - let y = self.cx().const_bool(false); - let llfn = self.cx().get_intrinsic( + let y = self.const_bool(false); + let llfn = self.get_intrinsic( &format!("llvm.{}.i{}", name, width), ); self.call(llfn, &[args[0].immediate(), y], None) } "ctlz_nonzero" | "cttz_nonzero" => { - let y = self.cx().const_bool(true); + let y = self.const_bool(true); let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); - let llfn = self.cx().get_intrinsic(llvm_name); + let llfn = self.get_intrinsic(llvm_name); self.call(llfn, &[args[0].immediate(), y], None) } "ctpop" => self.call( - self.cx().get_intrinsic(&format!("llvm.ctpop.i{}", width)), + self.get_intrinsic(&format!("llvm.ctpop.i{}", width)), &[args[0].immediate()], None ), @@ -327,7 +327,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { args[0].immediate() // byte swap a u8/i8 is just a no-op } else { self.call( - self.cx().get_intrinsic( + self.get_intrinsic( &format!("llvm.bswap.i{}", width), ), &[args[0].immediate()], @@ -337,7 +337,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { } "bitreverse" => { self.call( - self.cx().get_intrinsic( + self.get_intrinsic( &format!("llvm.bitreverse.i{}", width), ), &[args[0].immediate()], @@ -348,7 +348,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let intrinsic = format!("llvm.{}{}.with.overflow.i{}", if signed { 's' } else { 'u' }, &name[..3], width); - let llfn = self.cx().get_intrinsic(&intrinsic); + let llfn = self.get_intrinsic(&intrinsic); // Convert `i1` to a `bool`, and write it to the out parameter let pair = self.call(llfn, &[ @@ -357,7 +357,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { ], None); let val = self.extract_value(pair, 0); let overflow = self.extract_value(pair, 1); - let overflow = self.zext(overflow, self.cx().type_bool()); + let overflow = self.zext(overflow, self.type_bool()); let dest = result.project_field(self, 0); self.store(val, dest.llval, dest.align); @@ -402,13 +402,13 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { // rotate = funnel shift with first two args the same let llvm_name = &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width); - let llfn = self.cx().get_intrinsic(llvm_name); + let llfn = self.get_intrinsic(llvm_name); self.call(llfn, &[val, val, raw_shift], None) } else { // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW)) // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW)) - let width = self.cx().const_uint( - self.cx().type_ix(width), + let width = self.const_uint( + self.type_ix(width), width, ); let shift = self.urem(raw_shift, width); @@ -496,16 +496,16 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { (SequentiallyConsistent, Monotonic), "failacq" if is_cxchg => (SequentiallyConsistent, Acquire), - _ => self.cx().sess().fatal("unknown ordering in atomic intrinsic") + _ => self.sess().fatal("unknown ordering in atomic intrinsic") }, 4 => match (split[2], split[3]) { ("acq", "failrelaxed") if is_cxchg => (Acquire, Monotonic), ("acqrel", "failrelaxed") if is_cxchg => (AcquireRelease, Monotonic), - _ => self.cx().sess().fatal("unknown ordering in atomic intrinsic") + _ => self.sess().fatal("unknown ordering in atomic intrinsic") }, - _ => self.cx().sess().fatal("Atomic intrinsic not in correct format"), + _ => self.sess().fatal("Atomic intrinsic not in correct format"), }; let invalid_monomorphization = |ty| { @@ -517,7 +517,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { match split[1] { "cxchg" | "cxchgweak" => { let ty = substs.type_at(0); - if int_type_width_signed(ty, self.cx()).is_some() { + if int_type_width_signed(ty, self).is_some() { let weak = split[1] == "cxchgweak"; let pair = self.atomic_cmpxchg( args[0].immediate(), @@ -528,7 +528,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { weak); let val = self.extract_value(pair, 0); let success = self.extract_value(pair, 1); - let success = self.zext(success, self.cx().type_bool()); + let success = self.zext(success, self.type_bool()); let dest = result.project_field(self, 0); self.store(val, dest.llval, dest.align); @@ -542,8 +542,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { "load" => { let ty = substs.type_at(0); - if int_type_width_signed(ty, self.cx()).is_some() { - let size = self.cx().size_of(ty); + if int_type_width_signed(ty, self).is_some() { + let size = self.size_of(ty); self.atomic_load(args[0].immediate(), order, size) } else { return invalid_monomorphization(ty); @@ -552,8 +552,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { "store" => { let ty = substs.type_at(0); - if int_type_width_signed(ty, self.cx()).is_some() { - let size = self.cx().size_of(ty); + if int_type_width_signed(ty, self).is_some() { + let size = self.size_of(ty); self.atomic_store( args[1].immediate(), args[0].immediate(), @@ -590,11 +590,11 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { "min" => AtomicRmwBinOp::AtomicMin, "umax" => AtomicRmwBinOp::AtomicUMax, "umin" => AtomicRmwBinOp::AtomicUMin, - _ => self.cx().sess().fatal("unknown atomic operation") + _ => self.sess().fatal("unknown atomic operation") }; let ty = substs.type_at(0); - if int_type_width_signed(ty, self.cx()).is_some() { + if int_type_width_signed(ty, self).is_some() { self.atomic_rmw( atom_op, args[0].immediate(), @@ -681,7 +681,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { // This assumes the type is "simple", i.e. no // destructors, and the contents are SIMD // etc. - assert!(!bx.cx().type_needs_drop(arg.layout.ty)); + assert!(!bx.type_needs_drop(arg.layout.ty)); let (ptr, align) = match arg.val { OperandValue::Ref(ptr, None, align) => (ptr, align), _ => bug!() @@ -693,21 +693,21 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { }).collect() } intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); - vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))] + let llvm_elem = one(ty_to_type(bx, llvm_elem)); + vec![bx.pointercast(arg.immediate(), bx.type_ptr_to(llvm_elem))] } intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); + let llvm_elem = one(ty_to_type(bx, llvm_elem)); vec![ bx.bitcast(arg.immediate(), - bx.cx().type_vector(llvm_elem, length as u64)) + bx.type_vector(llvm_elem, length as u64)) ] } intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { // the LLVM intrinsic uses a smaller integer // size than the C intrinsic's signature, so // we have to trim it down here. - vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))] + vec![bx.trunc(arg.immediate(), bx.type_ix(llvm_width as u64))] } _ => vec![arg.immediate()], } @@ -715,10 +715,10 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let inputs = intr.inputs.iter() - .flat_map(|t| ty_to_type(self.cx(), t)) + .flat_map(|t| ty_to_type(self, t)) .collect::<Vec<_>>(); - let outputs = one(ty_to_type(self.cx(), &intr.output)); + let outputs = one(ty_to_type(self, &intr.output)); let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { modify_as_needed(self, t, arg) @@ -727,9 +727,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let val = match intr.definition { intrinsics::IntrinsicDef::Named(name) => { - let f = self.cx().declare_cfn( + let f = self.declare_cfn( name, - self.cx().type_func(&inputs, outputs), + self.type_func(&inputs, outputs), ); self.call(f, &llargs, None) } @@ -754,7 +754,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { if !fn_ty.ret.is_ignore() { if let PassMode::Cast(ty) = fn_ty.ret.mode { - let ptr_llty = self.cx().type_ptr_to(ty.llvm_type(self.cx())); + let ptr_llty = self.type_ptr_to(ty.llvm_type(self)); let ptr = self.pointercast(result.llval, ptr_llty); self.store(llval, ptr, result.align); } else { @@ -765,18 +765,18 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { } fn abort(&mut self) { - let fnname = self.cx().get_intrinsic(&("llvm.trap")); + let fnname = self.get_intrinsic(&("llvm.trap")); self.call(fnname, &[], None); } fn assume(&mut self, val: Self::Value) { - let assume_intrinsic = self.cx().get_intrinsic("llvm.assume"); + let assume_intrinsic = self.get_intrinsic("llvm.assume"); self.call(assume_intrinsic, &[val], None); } fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value { - let expect = self.cx().get_intrinsic(&"llvm.expect.i1"); - self.call(expect, &[cond, self.cx().const_bool(expected)], None) + let expect = self.get_intrinsic(&"llvm.expect.i1"); + self.call(expect, &[cond, self.const_bool(expected)], None) } } @@ -789,8 +789,8 @@ fn copy_intrinsic( src: &'ll Value, count: &'ll Value, ) { - let (size, align) = bx.cx().size_and_align_of(ty); - let size = bx.mul(bx.cx().const_usize(size.bytes()), count); + let (size, align) = bx.size_and_align_of(ty); + let size = bx.mul(bx.const_usize(size.bytes()), count); let flags = if volatile { MemFlags::VOLATILE } else { @@ -811,8 +811,8 @@ fn memset_intrinsic( val: &'ll Value, count: &'ll Value ) { - let (size, align) = bx.cx().size_and_align_of(ty); - let size = bx.mul(bx.cx().const_usize(size.bytes()), count); + let (size, align) = bx.size_and_align_of(ty); + let size = bx.mul(bx.const_usize(size.bytes()), count); let flags = if volatile { MemFlags::VOLATILE } else { @@ -828,11 +828,11 @@ fn try_intrinsic( local_ptr: &'ll Value, dest: &'ll Value, ) { - if bx.cx().sess().no_landing_pads() { + if bx.sess().no_landing_pads() { bx.call(func, &[data], None); let ptr_align = bx.tcx().data_layout.pointer_align.abi; - bx.store(bx.cx().const_null(bx.cx().type_i8p()), dest, ptr_align); - } else if wants_msvc_seh(bx.cx().sess()) { + bx.store(bx.const_null(bx.type_i8p()), dest, ptr_align); + } else if wants_msvc_seh(bx.sess()) { codegen_msvc_try(bx, func, data, local_ptr, dest); } else { codegen_gnu_try(bx, func, data, local_ptr, dest); @@ -853,8 +853,8 @@ fn codegen_msvc_try( local_ptr: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(bx.cx(), &mut |mut bx| { - bx.set_personality_fn(bx.cx().eh_personality()); + let llfn = get_rust_try_fn(bx, &mut |mut bx| { + bx.set_personality_fn(bx.eh_personality()); let mut normal = bx.build_sibling_block("normal"); let mut catchswitch = bx.build_sibling_block("catchswitch"); @@ -904,12 +904,12 @@ fn codegen_msvc_try( // } // // More information can be found in libstd's seh.rs implementation. - let i64p = bx.cx().type_ptr_to(bx.cx().type_i64()); + let i64p = bx.type_ptr_to(bx.type_i64()); let ptr_align = bx.tcx().data_layout.pointer_align.abi; let slot = bx.alloca(i64p, "slot", ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); - normal.ret(bx.cx().const_i32(0)); + normal.ret(bx.const_i32(0)); let cs = catchswitch.catch_switch(None, None, 1); catchswitch.add_handler(cs, catchpad.llbb()); @@ -918,12 +918,12 @@ fn codegen_msvc_try( Some(did) => bx.cx().get_static(did), None => bug!("msvc_try_filter not defined"), }; - let funclet = catchpad.catch_pad(cs, &[tydesc, bx.cx().const_i32(0), slot]); + let funclet = catchpad.catch_pad(cs, &[tydesc, bx.const_i32(0), slot]); let addr = catchpad.load(slot, ptr_align); let i64_align = bx.tcx().data_layout.i64_align.abi; let arg1 = catchpad.load(addr, i64_align); - let val1 = bx.cx().const_i32(1); + let val1 = bx.const_i32(1); let gep1 = catchpad.inbounds_gep(addr, &[val1]); let arg2 = catchpad.load(gep1, i64_align); let local_ptr = catchpad.bitcast(local_ptr, i64p); @@ -932,7 +932,7 @@ fn codegen_msvc_try( catchpad.store(arg2, gep2, i64_align); catchpad.catch_ret(&funclet, caught.llbb()); - caught.ret(bx.cx().const_i32(1)); + caught.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -960,7 +960,7 @@ fn codegen_gnu_try( local_ptr: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(bx.cx(), &mut |mut bx| { + let llfn = get_rust_try_fn(bx, &mut |mut bx| { // Codegens the shims described above: // // bx: @@ -985,7 +985,7 @@ fn codegen_gnu_try( let data = llvm::get_param(bx.llfn(), 1); let local_ptr = llvm::get_param(bx.llfn(), 2); bx.invoke(func, &[data], then.llbb(), catch.llbb(), None); - then.ret(bx.cx().const_i32(0)); + then.ret(bx.const_i32(0)); // Type indicator for the exception being thrown. // @@ -993,14 +993,14 @@ fn codegen_gnu_try( // being thrown. The second value is a "selector" indicating which of // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. - let lpad_ty = bx.cx().type_struct(&[bx.cx().type_i8p(), bx.cx().type_i32()], false); - let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1); - catch.add_clause(vals, bx.cx().const_null(bx.cx().type_i8p())); + let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); + let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1); + catch.add_clause(vals, bx.const_null(bx.type_i8p())); let ptr = catch.extract_value(vals, 0); let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let bitcast = catch.bitcast(local_ptr, bx.cx().type_ptr_to(bx.cx().type_i8p())); + let bitcast = catch.bitcast(local_ptr, bx.type_ptr_to(bx.type_i8p())); catch.store(ptr, bitcast, ptr_align); - catch.ret(bx.cx().const_i32(1)); + catch.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -1081,7 +1081,7 @@ fn generic_simd_intrinsic( }; ($msg: tt, $($fmt: tt)*) => { span_invalid_monomorphization_error( - bx.cx().sess(), span, + bx.sess(), span, &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), name, $($fmt)*)); } @@ -1142,7 +1142,7 @@ fn generic_simd_intrinsic( found `{}` with length {}", in_len, in_ty, ret_ty, out_len); - require!(bx.cx().type_kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer, + require!(bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer, "expected return type with integer elements, found `{}` with non-integer `{}`", ret_ty, ret_ty.simd_type(tcx)); @@ -1178,8 +1178,8 @@ fn generic_simd_intrinsic( let indices: Option<Vec<_>> = (0..n) .map(|i| { let arg_idx = i; - let val = bx.cx().const_get_elt(vector, i as u64); - match bx.cx().const_to_opt_u128(val, true) { + let val = bx.const_get_elt(vector, i as u64); + match bx.const_to_opt_u128(val, true) { None => { emit_error!("shuffle index #{} is not a constant", arg_idx); None @@ -1189,18 +1189,18 @@ fn generic_simd_intrinsic( arg_idx, total_len); None } - Some(idx) => Some(bx.cx().const_i32(idx as i32)), + Some(idx) => Some(bx.const_i32(idx as i32)), } }) .collect(); let indices = match indices { Some(i) => i, - None => return Ok(bx.cx().const_null(llret_ty)) + None => return Ok(bx.const_null(llret_ty)) }; return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), - bx.cx().const_vector(&indices))) + bx.const_vector(&indices))) } if name == "simd_insert" { @@ -1231,8 +1231,8 @@ fn generic_simd_intrinsic( _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty) } // truncate the mask to a vector of i1s - let i1 = bx.cx().type_i1(); - let i1xn = bx.cx().type_vector(i1, m_len as u64); + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, m_len as u64); let m_i1s = bx.trunc(args[0].immediate(), i1xn); return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } @@ -1252,7 +1252,7 @@ fn generic_simd_intrinsic( }; ($msg: tt, $($fmt: tt)*) => { span_invalid_monomorphization_error( - bx.cx().sess(), span, + bx.sess(), span, &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), name, $($fmt)*)); } @@ -1293,7 +1293,7 @@ fn generic_simd_intrinsic( }; let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety); - let intrinsic = bx.cx().get_intrinsic(&llvm_name); + let intrinsic = bx.get_intrinsic(&llvm_name); let c = bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None); @@ -1450,28 +1450,28 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = bx.cx().type_i32(); - let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32); + let alignment_ty = bx.type_i32(); + let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = bx.cx().type_i1(); - let i1xn = bx.cx().type_vector(i1, in_len as u64); + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count); let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = bx.cx().declare_cfn(&llvm_intrinsic, - bx.cx().type_func(&[ + let f = bx.declare_cfn(&llvm_intrinsic, + bx.type_func(&[ llvm_pointer_vec_ty, alignment_ty, mask_ty, @@ -1550,30 +1550,30 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = bx.cx().type_i32(); - let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32); + let alignment_ty = bx.type_i32(); + let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = bx.cx().type_i1(); - let i1xn = bx.cx().type_vector(i1, in_len as u64); + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; - let ret_t = bx.cx().type_void(); + let ret_t = bx.type_void(); // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count); let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = bx.cx().declare_cfn(&llvm_intrinsic, - bx.cx().type_func(&[llvm_elem_vec_ty, + let f = bx.declare_cfn(&llvm_intrinsic, + bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t)); @@ -1613,7 +1613,7 @@ fn generic_simd_intrinsic( // code is generated // * if the accumulator of the fmul isn't 1, incorrect // code is generated - match bx.cx().const_get_real(acc) { + match bx.const_get_real(acc) { None => return_error!("accumulator of {} is not a constant", $name), Some((v, loses_info)) => { if $name.contains("mul") && v != 1.0_f64 { @@ -1629,8 +1629,8 @@ fn generic_simd_intrinsic( } else { // unordered arithmetic reductions do not: match f.bit_width() { - 32 => bx.cx().const_undef(bx.cx().type_f32()), - 64 => bx.cx().const_undef(bx.cx().type_f64()), + 32 => bx.const_undef(bx.type_f32()), + 64 => bx.const_undef(bx.type_f64()), v => { return_error!(r#" unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, @@ -1707,8 +1707,8 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, } // boolean reductions operate on vectors of i1s: - let i1 = bx.cx().type_i1(); - let i1xn = bx.cx().type_vector(i1, in_len as u64); + let i1 = bx.type_i1(); + let i1xn = bx.type_vector(i1, in_len as u64); bx.trunc(args[0].immediate(), i1xn) }; return match in_elem.sty { @@ -1718,7 +1718,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, if !$boolean { r } else { - bx.zext(r, bx.cx().type_bool()) + bx.zext(r, bx.type_bool()) } ) }, |
