diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
28 files changed, 1089 insertions, 683 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 854e3ccc21b..dca9c1f04d3 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -11,11 +11,12 @@ use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::MemFlags; use rustc_middle::bug; +use rustc_middle::ty::layout::LayoutOf; pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; use rustc_middle::ty::Ty; use rustc_target::abi::call::ArgAbi; pub use rustc_target::abi::call::*; -use rustc_target::abi::{self, HasDataLayout, Int, LayoutOf}; +use rustc_target::abi::{self, HasDataLayout, Int}; pub use rustc_target::spec::abi::Abi; use libc::c_uint; @@ -353,7 +354,11 @@ pub trait FnAbiLlvmExt<'tcx> { impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { - let args_capacity: usize = self.args.iter().map(|arg| + // Ignore "extra" args from the call site for C variadic functions. + // Only the "fixed" args are part of the LLVM function signature. + let args = if self.c_variadic { &self.args[..self.fixed_count] } else { &self.args }; + + let args_capacity: usize = args.iter().map(|arg| if arg.pad.is_some() { 1 } else { 0 } + if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 } ).sum(); @@ -371,7 +376,7 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { } }; - for arg in &self.args { + for arg in args { // add padding if let Some(ty) = arg.pad { llargument_tys.push(ty.llvm_type(cx)); @@ -506,7 +511,12 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { } fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) { - // FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite. + if self.ret.layout.abi.is_uninhabited() { + llvm::Attribute::NoReturn.apply_callsite(llvm::AttributePlace::Function, callsite); + } + if !self.can_unwind { + llvm::Attribute::NoUnwind.apply_callsite(llvm::AttributePlace::Function, callsite); + } let mut i = 0; let mut apply = |cx: &CodegenCx<'_, '_>, attrs: &ArgAttributes| { @@ -516,7 +526,7 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { }; match self.ret.mode { PassMode::Direct(ref attrs) => { - attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, &bx.cx, callsite); + attrs.apply_attrs_to_callsite(llvm::AttributePlace::ReturnValue, bx.cx, callsite); } PassMode::Indirect { ref attrs, extra_attrs: _, on_stack } => { assert!(!on_stack); @@ -531,16 +541,13 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> { } _ => {} } - if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi { + if let abi::Abi::Scalar(scalar) = self.ret.layout.abi { // If the value is a boolean, the range is 0..2 and that ultimately // become 0..0 when the type becomes i1, which would be rejected // by the LLVM verifier. if let Int(..) = scalar.value { - if !scalar.is_bool() { - let range = scalar.valid_range_exclusive(bx); - if range.start != range.end { - bx.range_metadata(callsite, range); - } + if !scalar.is_bool() && !scalar.is_always_valid(bx) { + bx.range_metadata(callsite, scalar.valid_range); } } } diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index 068e5e99efe..30d91b41a8e 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -3,19 +3,22 @@ use libc::c_uint; use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS}; use rustc_middle::bug; use rustc_middle::ty::TyCtxt; +use rustc_session::config::DebugInfo; use rustc_span::symbol::sym; +use crate::debuginfo; use crate::llvm::{self, False, True}; use crate::ModuleLlvm; pub(crate) unsafe fn codegen( tcx: TyCtxt<'_>, - mods: &mut ModuleLlvm, + module_llvm: &mut ModuleLlvm, + module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool, ) { - let llcx = &*mods.llcx; - let llmod = mods.llmod(); + let llcx = &*module_llvm.llcx; + let llmod = module_llvm.llmod(); let usize = match tcx.sess.target.pointer_width { 16 => llvm::LLVMInt16TypeInContext(llcx), 32 => llvm::LLVMInt32TypeInContext(llcx), @@ -78,8 +81,14 @@ pub(crate) unsafe fn codegen( .enumerate() .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint)) .collect::<Vec<_>>(); - let ret = - llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None); + let ret = llvm::LLVMRustBuildCall( + llbuilder, + ty, + callee, + args.as_ptr(), + args.len() as c_uint, + None, + ); llvm::LLVMSetTailCall(ret, True); if output.is_some() { llvm::LLVMBuildRet(llbuilder, ret); @@ -121,8 +130,15 @@ pub(crate) unsafe fn codegen( .enumerate() .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint)) .collect::<Vec<_>>(); - let ret = llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None); + let ret = + llvm::LLVMRustBuildCall(llbuilder, ty, callee, args.as_ptr(), args.len() as c_uint, None); llvm::LLVMSetTailCall(ret, True); llvm::LLVMBuildRetVoid(llbuilder); llvm::LLVMDisposeBuilder(llbuilder); + + if tcx.sess.opts.debuginfo != DebugInfo::None { + let dbg_cx = debuginfo::CrateDebugContext::new(llmod); + debuginfo::metadata::compile_unit_metadata(tcx, module_name, &dbg_cx); + dbg_cx.finalize(tcx.sess); + } } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 7bd9397d649..341a8882416 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -105,7 +105,7 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { let r = r.unwrap(); // Again, based on how many outputs we have - let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); + let outputs = ia.outputs.iter().zip(&outputs).filter(|&(o, _)| !o.is_indirect); for (i, (_, &place)) in outputs.enumerate() { let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) }; OperandValue::Immediate(v).store(self, place); @@ -302,11 +302,19 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { "~{flags}".to_string(), ]); } - InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {} + InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => { + constraints.extend_from_slice(&[ + "~{vtype}".to_string(), + "~{vl}".to_string(), + "~{vxsat}".to_string(), + "~{vxrm}".to_string(), + ]); + } InlineAsmArch::Nvptx64 => {} InlineAsmArch::PowerPC | InlineAsmArch::PowerPC64 => {} InlineAsmArch::Hexagon => {} InlineAsmArch::Mips | InlineAsmArch::Mips64 => {} + InlineAsmArch::S390x => {} InlineAsmArch::SpirV => {} InlineAsmArch::Wasm32 => {} InlineAsmArch::Bpf => {} @@ -323,7 +331,7 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { let output_type = match &output_types[..] { [] => self.type_void(), [ty] => ty, - tys => self.type_struct(&tys, false), + tys => self.type_struct(tys, false), }; let dialect = match asm_arch { InlineAsmArch::X86 | InlineAsmArch::X86_64 @@ -425,7 +433,7 @@ impl AsmMethods for CodegenCx<'ll, 'tcx> { } } -fn inline_asm_call( +pub(crate) fn inline_asm_call( bx: &mut Builder<'a, 'll, 'tcx>, asm: &str, cons: &str, @@ -464,7 +472,7 @@ fn inline_asm_call( alignstack, llvm::AsmDialect::from_generic(dia), ); - let call = bx.call(v, inputs, None); + let call = bx.call(fty, v, inputs, None); // Store mark in a metadata node so we can map LLVM errors // back to source locations. See #17552. @@ -608,6 +616,10 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>) InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r", InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b", InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f", + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) + | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { + unreachable!("clobber-only") + } InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r", InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f", InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => { @@ -626,6 +638,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>) InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r", InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r", InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w", + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f", InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") } @@ -704,6 +718,7 @@ fn modifier_to_llvm( } InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None, InlineAsmRegClass::Bpf(_) => None, + InlineAsmRegClass::S390x(_) => None, InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") } @@ -744,6 +759,10 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(), InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(), + InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) + | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { + unreachable!("clobber-only") + } InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(), InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => { @@ -762,6 +781,8 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(), InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(), InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(), + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(), InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") } @@ -771,7 +792,7 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll /// Helper function to get the LLVM type for a Scalar. Pointers are returned as /// the equivalent integer type. -fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: &Scalar) -> &'ll Type { +fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: Scalar) -> &'ll Type { match scalar.value { Primitive::Int(Integer::I8, _) => cx.type_i8(), Primitive::Int(Integer::I16, _) => cx.type_i16(), @@ -791,7 +812,7 @@ fn llvm_fixup_input( reg: InlineAsmRegClass, layout: &TyAndLayout<'tcx>, ) -> &'ll Value { - match (reg, &layout.abi) { + match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.value { let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8); @@ -814,7 +835,7 @@ fn llvm_fixup_input( Abi::Vector { element, count }, ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); - let vec_ty = bx.cx.type_vector(elem_ty, *count); + let vec_ty = bx.cx.type_vector(elem_ty, count); let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } @@ -869,7 +890,7 @@ fn llvm_fixup_output( reg: InlineAsmRegClass, layout: &TyAndLayout<'tcx>, ) -> &'ll Value { - match (reg, &layout.abi) { + match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.value { bx.extract_element(value, bx.const_i32(0)) @@ -889,8 +910,8 @@ fn llvm_fixup_output( Abi::Vector { element, count }, ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); - let vec_ty = bx.cx.type_vector(elem_ty, *count * 2); - let indices: Vec<_> = (0..*count).map(|x| bx.const_i32(x as i32)).collect(); + let vec_ty = bx.cx.type_vector(elem_ty, count * 2); + let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) @@ -944,7 +965,7 @@ fn llvm_fixup_output_type( reg: InlineAsmRegClass, layout: &TyAndLayout<'tcx>, ) -> &'ll Type { - match (reg, &layout.abi) { + match (reg, layout.abi) { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.value { cx.type_vector(cx.type_i8(), 8) diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index 56b93f83466..51c70f0868f 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -305,9 +305,12 @@ pub fn from_fn_attrs(cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value, instance: ty:: let mut function_features = codegen_fn_attrs .target_features .iter() - .map(|f| { + .flat_map(|f| { let feature = &f.as_str(); - format!("+{}", llvm_util::to_llvm_feature(cx.tcx.sess, feature)) + llvm_util::to_llvm_feature(cx.tcx.sess, feature) + .into_iter() + .map(|f| format!("+{}", f)) + .collect::<Vec<String>>() }) .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x { InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(), diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index 6ac7093b7de..4e86946219f 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -9,18 +9,15 @@ use std::str; use crate::llvm::archive_ro::{ArchiveRO, Child}; use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport}; -use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder}; -use rustc_codegen_ssa::{looks_like_rust_object_file, METADATA_FILENAME}; +use rustc_codegen_ssa::back::archive::ArchiveBuilder; use rustc_data_structures::temp_dir::MaybeTempDir; use rustc_middle::middle::cstore::{DllCallingConvention, DllImport}; use rustc_session::Session; -use rustc_span::symbol::Symbol; struct ArchiveConfig<'a> { pub sess: &'a Session, pub dst: PathBuf, pub src: Option<PathBuf>, - pub lib_search_paths: Vec<PathBuf>, } /// Helper for adding many files to an archive. @@ -54,13 +51,7 @@ fn is_relevant_child(c: &Child<'_>) -> bool { } fn archive_config<'a>(sess: &'a Session, output: &Path, input: Option<&Path>) -> ArchiveConfig<'a> { - use rustc_codegen_ssa::back::link::archive_search_paths; - ArchiveConfig { - sess, - dst: output.to_path_buf(), - src: input.map(|p| p.to_path_buf()), - lib_search_paths: archive_search_paths(sess), - } + ArchiveConfig { sess, dst: output.to_path_buf(), src: input.map(|p| p.to_path_buf()) } } /// Map machine type strings to values of LLVM's MachineTypes enum. @@ -111,57 +102,23 @@ impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> { .collect() } - /// Adds all of the contents of a native library to this archive. This will - /// search in the relevant locations for a library named `name`. - fn add_native_library(&mut self, name: Symbol, verbatim: bool) { - let location = - find_library(name, verbatim, &self.config.lib_search_paths, self.config.sess); - self.add_archive(&location, |_| false).unwrap_or_else(|e| { - self.config.sess.fatal(&format!( - "failed to add native library {}: {}", - location.to_string_lossy(), - e - )); + fn add_archive<F>(&mut self, archive: &Path, skip: F) -> io::Result<()> + where + F: FnMut(&str) -> bool + 'static, + { + let archive_ro = match ArchiveRO::open(archive) { + Ok(ar) => ar, + Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), + }; + if self.additions.iter().any(|ar| ar.path() == archive) { + return Ok(()); + } + self.additions.push(Addition::Archive { + path: archive.to_path_buf(), + archive: archive_ro, + skip: Box::new(skip), }); - } - - /// Adds all of the contents of the rlib at the specified path to this - /// archive. - /// - /// This ignores adding the bytecode from the rlib, and if LTO is enabled - /// then the object file also isn't added. - fn add_rlib( - &mut self, - rlib: &Path, - name: &str, - lto: bool, - skip_objects: bool, - ) -> io::Result<()> { - // Ignoring obj file starting with the crate name - // as simple comparison is not enough - there - // might be also an extra name suffix - let obj_start = name.to_owned(); - - self.add_archive(rlib, move |fname: &str| { - // Ignore metadata files, no matter the name. - if fname == METADATA_FILENAME { - return true; - } - - // Don't include Rust objects if LTO is enabled - if lto && looks_like_rust_object_file(fname) { - return true; - } - - // Otherwise if this is *not* a rust object and we're skipping - // objects then skip this file - if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) { - return true; - } - - // ok, don't skip this - false - }) + Ok(()) } /// Adds an arbitrary file to this archive @@ -270,25 +227,6 @@ impl<'a> LlvmArchiveBuilder<'a> { self.src_archive.as_ref().unwrap().as_ref() } - fn add_archive<F>(&mut self, archive: &Path, skip: F) -> io::Result<()> - where - F: FnMut(&str) -> bool + 'static, - { - let archive_ro = match ArchiveRO::open(archive) { - Ok(ar) => ar, - Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), - }; - if self.additions.iter().any(|ar| ar.path() == archive) { - return Ok(()); - } - self.additions.push(Addition::Archive { - path: archive.to_path_buf(), - archive: archive_ro, - skip: Box::new(skip), - }); - Ok(()) - } - fn llvm_archive_kind(&self) -> Result<ArchiveKind, &str> { let kind = &*self.config.sess.target.archive_format; kind.parse().map_err(|_| kind) diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index f612785e5a4..e2b33509b40 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -109,7 +109,7 @@ fn prepare_lto( .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter)); } - let archive = ArchiveRO::open(&path).expect("wanted an rlib"); + let archive = ArchiveRO::open(path).expect("wanted an rlib"); let obj_files = archive .iter() .filter_map(|child| child.ok().and_then(|c| c.name().map(|name| (name, c)))) @@ -316,14 +316,28 @@ fn fat_lto( .generic_activity_with_arg("LLVM_fat_lto_link_module", format!("{:?}", name)); info!("linking {:?}", name); let data = bc_decoded.data(); - linker.add(&data).map_err(|()| { + linker.add(data).map_err(|()| { let msg = format!("failed to load bc of {:?}", name); - write::llvm_err(&diag_handler, &msg) + write::llvm_err(diag_handler, &msg) })?; serialized_bitcode.push(bc_decoded); } drop(linker); - save_temp_bitcode(&cgcx, &module, "lto.input"); + save_temp_bitcode(cgcx, &module, "lto.input"); + + // Fat LTO also suffers from the invalid DWARF issue similar to Thin LTO. + // Here we rewrite all `DICompileUnit` pointers if there is only one `DICompileUnit`. + // This only works around the problem when codegen-units = 1. + // Refer to the comments in the `optimize_thin_module` function for more details. + let mut cu1 = ptr::null_mut(); + let mut cu2 = ptr::null_mut(); + unsafe { llvm::LLVMRustLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2) }; + if !cu2.is_null() { + let _timer = + cgcx.prof.generic_activity_with_arg("LLVM_fat_lto_patch_debuginfo", &*module.name); + unsafe { llvm::LLVMRustLTOPatchDICompileUnit(llmod, cu1) }; + save_temp_bitcode(cgcx, &module, "fat-lto-after-patch"); + } // Internalize everything below threshold to help strip out more modules and such. unsafe { @@ -333,14 +347,14 @@ fn fat_lto( ptr as *const *const libc::c_char, symbols_below_threshold.len() as libc::size_t, ); - save_temp_bitcode(&cgcx, &module, "lto.after-restriction"); + save_temp_bitcode(cgcx, &module, "lto.after-restriction"); } if cgcx.no_landing_pads { unsafe { llvm::LLVMRustMarkAllFunctionsNounwind(llmod); } - save_temp_bitcode(&cgcx, &module, "lto.after-nounwind"); + save_temp_bitcode(cgcx, &module, "lto.after-nounwind"); } } @@ -484,7 +498,7 @@ fn thin_lto( symbols_below_threshold.as_ptr(), symbols_below_threshold.len() as u32, ) - .ok_or_else(|| write::llvm_err(&diag_handler, "failed to prepare thin LTO context"))?; + .ok_or_else(|| write::llvm_err(diag_handler, "failed to prepare thin LTO context"))?; let data = ThinData(data); @@ -558,7 +572,7 @@ fn thin_lto( if let Some(path) = key_map_path { if let Err(err) = curr_key_map.save_to_file(&path) { let msg = format!("Error while writing ThinLTO key data: {}", err); - return Err(write::llvm_err(&diag_handler, &msg)); + return Err(write::llvm_err(diag_handler, &msg)); } } @@ -730,8 +744,7 @@ pub unsafe fn optimize_thin_module( // crates but for locally codegened modules we may be able to reuse // that LLVM Context and Module. let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); - let llmod_raw = - parse_module(llcx, &module_name, thin_module.data(), &diag_handler)? as *const _; + let llmod_raw = parse_module(llcx, module_name, thin_module.data(), &diag_handler)? as *const _; let module = ModuleCodegen { module_llvm: ModuleLlvm { llmod_raw, llcx, tm }, name: thin_module.name().to_string(), @@ -740,7 +753,7 @@ pub unsafe fn optimize_thin_module( { let target = &*module.module_llvm.tm; let llmod = module.module_llvm.llmod(); - save_temp_bitcode(&cgcx, &module, "thin-lto-input"); + save_temp_bitcode(cgcx, &module, "thin-lto-input"); // Before we do much else find the "main" `DICompileUnit` that we'll be // using below. If we find more than one though then rustc has changed @@ -748,7 +761,7 @@ pub unsafe fn optimize_thin_module( // an error. let mut cu1 = ptr::null_mut(); let mut cu2 = ptr::null_mut(); - llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2); + llvm::LLVMRustLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2); if !cu2.is_null() { let msg = "multiple source DICompileUnits found"; return Err(write::llvm_err(&diag_handler, msg)); @@ -761,7 +774,7 @@ pub unsafe fn optimize_thin_module( .prof .generic_activity_with_arg("LLVM_thin_lto_remove_landing_pads", thin_module.name()); llvm::LLVMRustMarkAllFunctionsNounwind(llmod); - save_temp_bitcode(&cgcx, &module, "thin-lto-after-nounwind"); + save_temp_bitcode(cgcx, &module, "thin-lto-after-nounwind"); } // Up next comes the per-module local analyses that we do for Thin LTO. @@ -847,7 +860,7 @@ pub unsafe fn optimize_thin_module( let _timer = cgcx .prof .generic_activity_with_arg("LLVM_thin_lto_patch_debuginfo", thin_module.name()); - llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); + llvm::LLVMRustLTOPatchDICompileUnit(llmod, cu1); save_temp_bitcode(cgcx, &module, "thin-lto-after-patch"); } @@ -933,7 +946,7 @@ pub fn parse_module<'a>( llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr()).ok_or_else( || { let msg = "failed to parse bitcode for LTO module"; - write::llvm_err(&diag_handler, msg) + write::llvm_err(diag_handler, msg) }, ) } diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 5b4a187a1d5..ab48c56a626 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -41,7 +41,7 @@ use std::sync::Arc; pub fn llvm_err(handler: &rustc_errors::Handler, msg: &str) -> FatalError { match llvm::last_error() { Some(err) => handler.fatal(&format!("{}: {}", msg, err)), - None => handler.fatal(&msg), + None => handler.fatal(msg), } } @@ -96,7 +96,7 @@ pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> &'static mut ll None }; let config = TargetMachineFactoryConfig { split_dwarf_file }; - target_machine_factory(&tcx.sess, tcx.backend_optimization_level(()))(config) + target_machine_factory(tcx.sess, tcx.backend_optimization_level(()))(config) .unwrap_or_else(|err| llvm_err(tcx.sess.diagnostic(), &err).raise()) } @@ -129,7 +129,8 @@ fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel fn to_llvm_relocation_model(relocation_model: RelocModel) -> llvm::RelocModel { match relocation_model { RelocModel::Static => llvm::RelocModel::Static, - RelocModel::Pic => llvm::RelocModel::PIC, + // LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra attribute. + RelocModel::Pic | RelocModel::Pie => llvm::RelocModel::PIC, RelocModel::DynamicNoPic => llvm::RelocModel::DynamicNoPic, RelocModel::Ropi => llvm::RelocModel::ROPI, RelocModel::Rwpi => llvm::RelocModel::RWPI, @@ -296,39 +297,8 @@ unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, user: *const c_void } let (cgcx, _) = *(user as *const (&CodegenContext<LlvmCodegenBackend>, &Handler)); - // Recover the post-substitution assembly code from LLVM for better - // diagnostics. - let mut have_source = false; - let mut buffer = String::new(); - let mut level = llvm::DiagnosticLevel::Error; - let mut loc = 0; - let mut ranges = [0; 8]; - let mut num_ranges = ranges.len() / 2; - let msg = llvm::build_string(|msg| { - buffer = llvm::build_string(|buffer| { - have_source = llvm::LLVMRustUnpackSMDiagnostic( - diag, - msg, - buffer, - &mut level, - &mut loc, - ranges.as_mut_ptr(), - &mut num_ranges, - ); - }) - .expect("non-UTF8 inline asm"); - }) - .expect("non-UTF8 SMDiagnostic"); - - let source = have_source.then(|| { - let mut spans = vec![InnerSpan::new(loc as usize, loc as usize)]; - for i in 0..num_ranges { - spans.push(InnerSpan::new(ranges[i * 2] as usize, ranges[i * 2 + 1] as usize)); - } - (buffer, spans) - }); - - report_inline_asm(cgcx, msg, level, cookie, source); + let smdiag = llvm::diagnostic::SrcMgrDiagnostic::unpack(diag); + report_inline_asm(cgcx, smdiag.message, smdiag.level, cookie, smdiag.source); } unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void) { @@ -339,13 +309,7 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void match llvm::diagnostic::Diagnostic::unpack(info) { llvm::diagnostic::InlineAsm(inline) => { - report_inline_asm( - cgcx, - llvm::twine_to_string(inline.message), - inline.level, - inline.cookie, - None, - ); + report_inline_asm(cgcx, inline.message, inline.level, inline.cookie, inline.source); } llvm::diagnostic::Optimization(opt) => { @@ -407,8 +371,9 @@ fn get_pgo_use_path(config: &ModuleConfig) -> Option<CString> { } pub(crate) fn should_use_new_llvm_pass_manager(config: &ModuleConfig) -> bool { - // The new pass manager is disabled by default. - config.new_llvm_pass_manager.unwrap_or(false) + // The new pass manager is enabled by default for LLVM >= 13. + // This matches Clang, which also enables it since Clang 13. + config.new_llvm_pass_manager.unwrap_or_else(|| llvm_util::get_version() >= (13, 0, 0)) } pub(crate) unsafe fn optimize_with_new_llvm_pass_manager( @@ -441,13 +406,15 @@ pub(crate) unsafe fn optimize_with_new_llvm_pass_manager( None }; - let llvm_selfprofiler = if cgcx.prof.llvm_recording_enabled() { - let mut llvm_profiler = LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap()); - &mut llvm_profiler as *mut _ as *mut c_void + let mut llvm_profiler = if cgcx.prof.llvm_recording_enabled() { + Some(LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap())) } else { - std::ptr::null_mut() + None }; + let llvm_selfprofiler = + llvm_profiler.as_mut().map(|s| s as *mut _ as *mut c_void).unwrap_or(std::ptr::null_mut()); + let extra_passes = config.passes.join(","); // FIXME: NewPM doesn't provide a facility to pass custom InlineParams. @@ -591,7 +558,7 @@ pub(crate) unsafe fn optimize( let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal || (cgcx.lto != Lto::Fat && cgcx.opts.cg.linker_plugin_lto.enabled()); - with_llvm_pmb(llmod, &config, opt_level, prepare_for_thin_lto, &mut |b| { + with_llvm_pmb(llmod, config, opt_level, prepare_for_thin_lto, &mut |b| { llvm::LLVMRustAddLastExtensionPasses( b, extra_passes.as_ptr(), @@ -693,9 +660,9 @@ pub(crate) fn link( let _timer = cgcx.prof.generic_activity_with_arg("LLVM_link_module", format!("{:?}", module.name)); let buffer = ModuleBuffer::new(module.module_llvm.llmod()); - linker.add(&buffer.data()).map_err(|()| { + linker.add(buffer.data()).map_err(|()| { let msg = format!("failed to serialize module {:?}", module.name); - llvm_err(&diag_handler, &msg) + llvm_err(diag_handler, &msg) })?; } drop(linker); diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index cc3cbea4def..3026c2fa030 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -25,9 +25,9 @@ use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use rustc_data_structures::small_c_str::SmallCStr; +use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; -use rustc_middle::middle::cstore::EncodedMetadata; use rustc_middle::middle::exported_symbols; use rustc_middle::mir::mono::{Linkage, Visibility}; use rustc_middle::ty::TyCtxt; @@ -64,7 +64,7 @@ pub fn write_compressed_metadata<'tcx>( let (metadata_llcx, metadata_llmod) = (&*llvm_module.llcx, llvm_module.llmod()); let mut compressed = rustc_metadata::METADATA_HEADER.to_vec(); - FrameEncoder::new(&mut compressed).write_all(&metadata.raw_data).unwrap(); + FrameEncoder::new(&mut compressed).write_all(metadata.raw_data()).unwrap(); let llmeta = common::bytes_in_context(metadata_llcx, &compressed); let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false); @@ -157,16 +157,18 @@ pub fn compile_codegen_unit( } // Finalize code coverage by injecting the coverage map. Note, the coverage map will - // also be added to the `llvm.used` variable, created next. + // also be added to the `llvm.compiler.used` variable, created next. if cx.sess().instrument_coverage() { cx.coverageinfo_finalize(); } - // Create the llvm.used variable - // This variable has type [N x i8*] and is stored in the llvm.metadata section + // Create the llvm.used and llvm.compiler.used variables. if !cx.used_statics().borrow().is_empty() { cx.create_used_variable() } + if !cx.compiler_used_statics().borrow().is_empty() { + cx.create_compiler_used_variable() + } // Finalize debuginfo if cx.sess().opts.debuginfo != DebugInfo::None { diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 5675a5d9812..9f7b8616d78 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -15,15 +15,17 @@ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::MemFlags; use rustc_data_structures::small_c_str::SmallCStr; use rustc_hir::def_id::DefId; -use rustc_middle::ty::layout::TyAndLayout; +use rustc_middle::ty::layout::{ + FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout, +}; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::Span; -use rustc_target::abi::{self, Align, Size}; +use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange}; use rustc_target::spec::{HasTargetSpec, Target}; use std::borrow::Cow; use std::ffi::CStr; use std::iter; -use std::ops::{Deref, Range}; +use std::ops::Deref; use std::ptr; use tracing::debug; @@ -84,16 +86,30 @@ impl ty::layout::HasParamEnv<'tcx> for Builder<'_, '_, 'tcx> { impl HasTargetSpec for Builder<'_, '_, 'tcx> { #[inline] fn target_spec(&self) -> &Target { - &self.cx.target_spec() + self.cx.target_spec() } } -impl abi::LayoutOf for Builder<'_, '_, 'tcx> { - type Ty = Ty<'tcx>; - type TyAndLayout = TyAndLayout<'tcx>; +impl LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { + type LayoutOfResult = TyAndLayout<'tcx>; - fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout { - self.cx.layout_of(ty) + #[inline] + fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { + self.cx.handle_layout_err(err, span, ty) + } +} + +impl FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { + type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>; + + #[inline] + fn handle_fn_abi_err( + &self, + err: FnAbiError<'tcx>, + span: Span, + fn_abi_request: FnAbiRequest<'tcx>, + ) -> ! { + self.cx.handle_fn_abi_err(err, span, fn_abi_request) } } @@ -200,6 +216,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn invoke( &mut self, + llty: &'ll Type, llfn: &'ll Value, args: &[&'ll Value], then: &'ll BasicBlock, @@ -208,13 +225,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) -> &'ll Value { debug!("invoke {:?} with args ({:?})", llfn, args); - let args = self.check_call("invoke", llfn, args); + let args = self.check_call("invoke", llty, llfn, args); let bundle = funclet.map(|funclet| funclet.bundle()); let bundle = bundle.as_ref().map(|b| &*b.raw); unsafe { llvm::LLVMRustBuildInvoke( self.llbuilder, + llty, llfn, args.as_ptr(), args.len() as c_uint, @@ -369,8 +387,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }, }; - let intrinsic = self.get_intrinsic(&name); - let res = self.call(intrinsic, &[lhs, rhs], None); + let res = self.call_intrinsic(name, &[lhs, rhs]); (self.extract_value(res, 0), self.extract_value(res, 1)) } @@ -381,7 +398,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { val } } - fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value { + fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value { if scalar.is_bool() { return self.trunc(val, self.cx().type_i1()); } @@ -459,17 +476,15 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn scalar_load_metadata<'a, 'll, 'tcx>( bx: &mut Builder<'a, 'll, 'tcx>, load: &'ll Value, - scalar: &abi::Scalar, + scalar: abi::Scalar, ) { - let vr = scalar.valid_range.clone(); match scalar.value { abi::Int(..) => { - let range = scalar.valid_range_exclusive(bx); - if range.start != range.end { - bx.range_metadata(load, range); + if !scalar.is_always_valid(bx) { + bx.range_metadata(load, scalar.valid_range); } } - abi::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { + abi::Pointer if !scalar.valid_range.contains(0) => { bx.nonnull_metadata(load); } _ => {} @@ -489,17 +504,18 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } let llval = const_llval.unwrap_or_else(|| { let load = self.load(place.layout.llvm_type(self), place.llval, place.align); - if let abi::Abi::Scalar(ref scalar) = place.layout.abi { + if let abi::Abi::Scalar(scalar) = place.layout.abi { scalar_load_metadata(self, load, scalar); } load }); OperandValue::Immediate(self.to_immediate(llval, place.layout)) - } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { + } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi { let b_offset = a.value.size(self).align_to(b.value.align(self).abi); + let pair_ty = place.layout.llvm_type(self); - let mut load = |i, scalar: &abi::Scalar, align| { - let llptr = self.struct_gep(place.llval, i as u64); + let mut load = |i, scalar: abi::Scalar, align| { + let llptr = self.struct_gep(pair_ty, place.llval, i as u64); let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let load = self.load(llty, llptr, align); scalar_load_metadata(self, load, scalar); @@ -543,16 +559,20 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { .val .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align)); - let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]); + let next = body_bx.inbounds_gep( + self.backend_type(cg_elem.layout), + current, + &[self.const_usize(1)], + ); body_bx.br(header_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); next_bx } - fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) { + fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) { if self.sess().target.arch == "amdgpu" { - // amdgpu/LLVM does something weird and thinks a i64 value is + // amdgpu/LLVM does something weird and thinks an i64 value is // split into a v2i32, halving the bitwidth LLVM expects, // tripping an assertion. So, for now, just disable this // optimization. @@ -563,7 +583,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let llty = self.cx.val_ty(load); let v = [ self.cx.const_uint_big(llty, range.start), - self.cx.const_uint_big(llty, range.end), + self.cx.const_uint_big(llty, range.end.wrapping_add(1)), ]; llvm::LLVMSetMetadata( @@ -639,10 +659,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { unsafe { - llvm::LLVMBuildGEP( + llvm::LLVMBuildGEP2( self.llbuilder, + ty, ptr, indices.as_ptr(), indices.len() as c_uint, @@ -651,10 +672,16 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn inbounds_gep( + &mut self, + ty: &'ll Type, + ptr: &'ll Value, + indices: &[&'ll Value], + ) -> &'ll Value { unsafe { - llvm::LLVMBuildInBoundsGEP( + llvm::LLVMBuildInBoundsGEP2( self.llbuilder, + ty, ptr, indices.as_ptr(), indices.len() as c_uint, @@ -663,9 +690,9 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { + fn struct_gep(&mut self, ty: &'ll Type, ptr: &'ll Value, idx: u64) -> &'ll Value { assert_eq!(idx as c_uint as u64, idx); - unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED) } + unsafe { llvm::LLVMBuildStructGEP2(self.llbuilder, ty, ptr, idx as c_uint, UNNAMED) } } /* Casts */ @@ -683,8 +710,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let float_width = self.cx.float_width(src_ty); let int_width = self.cx.int_width(dest_ty); let name = format!("llvm.fptoui.sat.i{}.f{}", int_width, float_width); - let intrinsic = self.get_intrinsic(&name); - return Some(self.call(intrinsic, &[val], None)); + return Some(self.call_intrinsic(&name, &[val])); } None @@ -696,8 +722,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let float_width = self.cx.float_width(src_ty); let int_width = self.cx.int_width(dest_ty); let name = format!("llvm.fptosi.sat.i{}.f{}", int_width, float_width); - let intrinsic = self.get_intrinsic(&name); - return Some(self.call(intrinsic, &[val], None)); + return Some(self.call_intrinsic(&name, &[val])); } None @@ -731,8 +756,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { _ => None, }; if let Some(name) = name { - let intrinsic = self.get_intrinsic(name); - return self.call(intrinsic, &[val], None); + return self.call_intrinsic(name, &[val]); } } } @@ -754,8 +778,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { _ => None, }; if let Some(name) = name { - let intrinsic = self.get_intrinsic(name); - return self.call(intrinsic, &[val], None); + return self.call_intrinsic(name, &[val]); } } } @@ -1103,12 +1126,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ); let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) }; + let llty = self.cx.type_func( + &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()], + self.cx.type_void(), + ); let args = &[fn_name, hash, num_counters, index]; - let args = self.check_call("call", llfn, args); + let args = self.check_call("call", llty, llfn, args); unsafe { let _ = llvm::LLVMRustBuildCall( self.llbuilder, + llty, llfn, args.as_ptr() as *const &llvm::Value, args.len() as c_uint, @@ -1119,19 +1147,21 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn call( &mut self, + llty: &'ll Type, llfn: &'ll Value, args: &[&'ll Value], funclet: Option<&Funclet<'ll>>, ) -> &'ll Value { debug!("call {:?} with args ({:?})", llfn, args); - let args = self.check_call("call", llfn, args); + let args = self.check_call("call", llty, llfn, args); let bundle = funclet.map(|funclet| funclet.bundle()); let bundle = bundle.as_ref().map(|b| &*b.raw); unsafe { llvm::LLVMRustBuildCall( self.llbuilder, + llty, llfn, args.as_ptr() as *const &llvm::Value, args.len() as c_uint, @@ -1301,15 +1331,10 @@ impl Builder<'a, 'll, 'tcx> { fn check_call<'b>( &mut self, typ: &str, + fn_ty: &'ll Type, llfn: &'ll Value, args: &'b [&'ll Value], ) -> Cow<'b, [&'ll Value]> { - let mut fn_ty = self.cx.val_ty(llfn); - // Strip off pointers - while self.cx.type_kind(fn_ty) == TypeKind::Pointer { - fn_ty = self.cx.element_type(fn_ty); - } - assert!( self.cx.type_kind(fn_ty) == TypeKind::Function, "builder::{} not passed a function, but {:?}", @@ -1350,6 +1375,11 @@ impl Builder<'a, 'll, 'tcx> { unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) } } + crate fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value { + let (ty, f) = self.cx.get_intrinsic(intrinsic); + self.call(ty, f, args, None) + } + fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { let size = size.bytes(); if size == 0 { @@ -1360,10 +1390,8 @@ impl Builder<'a, 'll, 'tcx> { return; } - let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); - let ptr = self.pointercast(ptr, self.cx.type_i8p()); - self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); + self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]); } pub(crate) fn phi( diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index bb16c90cd12..5d68d2b77d4 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -4,7 +4,7 @@ //! and methods are represented as just a fn ptr and not a full //! closure. -use crate::abi::{FnAbi, FnAbiLlvmExt}; +use crate::abi::FnAbiLlvmExt; use crate::attributes; use crate::context::CodegenCx; use crate::llvm; @@ -12,7 +12,7 @@ use crate::value::Value; use rustc_codegen_ssa::traits::*; use tracing::debug; -use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt}; +use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use rustc_middle::ty::{self, Instance, TypeFoldable}; /// Codegens a reference to a fn/method item, monomorphizing and @@ -42,9 +42,9 @@ pub fn get_fn(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value sym ); - let fn_abi = FnAbi::of_instance(cx, instance, &[]); + let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); - let llfn = if let Some(llfn) = cx.get_declared_value(&sym) { + let llfn = if let Some(llfn) = cx.get_declared_value(sym) { // Create a fn pointer with the new signature. let llptrty = fn_abi.ptr_to_llvm_type(cx); @@ -79,7 +79,7 @@ pub fn get_fn(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value llfn } } else { - let llfn = cx.declare_fn(&sym, &fn_abi); + let llfn = cx.declare_fn(sym, fn_abi); debug!("get_fn: not casting pointer!"); attributes::from_fn_attrs(cx, llfn, instance); diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 35e72621c56..73a8d464431 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -12,9 +12,10 @@ use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_middle::bug; use rustc_middle::mir::interpret::{Allocation, GlobalAlloc, Scalar}; -use rustc_middle::ty::{layout::TyAndLayout, ScalarInt}; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_middle::ty::ScalarInt; use rustc_span::symbol::Symbol; -use rustc_target::abi::{self, AddressSpace, HasDataLayout, LayoutOf, Pointer, Size}; +use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer, Size}; use libc::{c_char, c_uint}; use tracing::debug; @@ -227,7 +228,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { }) } - fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value { + fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value { let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; match cv { Scalar::Int(ScalarInt::ZST) => { @@ -268,7 +269,8 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } }; let llval = unsafe { - llvm::LLVMConstInBoundsGEP( + llvm::LLVMRustConstInBoundsGEP2( + self.type_i8(), self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)), &self.const_usize(offset.bytes()), 1, @@ -303,7 +305,8 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { let base_addr = self.static_addr_of(init, alloc.align, None); let llval = unsafe { - llvm::LLVMConstInBoundsGEP( + llvm::LLVMRustConstInBoundsGEP2( + self.type_i8(), self.const_bitcast(base_addr, self.type_i8p()), &self.const_usize(offset.bytes()), 1, diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 3ca295f4a7e..1afa6f02836 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -11,12 +11,17 @@ use rustc_codegen_ssa::traits::*; use rustc_hir::def_id::DefId; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; use rustc_middle::mir::interpret::{ - read_target_uint, Allocation, ErrorHandled, GlobalAlloc, Pointer, Scalar as InterpScalar, + read_target_uint, Allocation, ErrorHandled, GlobalAlloc, InitChunk, Pointer, + Scalar as InterpScalar, }; use rustc_middle::mir::mono::MonoItem; +use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::{bug, span_bug}; -use rustc_target::abi::{AddressSpace, Align, HasDataLayout, LayoutOf, Primitive, Scalar, Size}; +use rustc_target::abi::{ + AddressSpace, Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange, +}; +use std::ops::Range; use tracing::debug; pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value { @@ -24,6 +29,57 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll let dl = cx.data_layout(); let pointer_size = dl.pointer_size.bytes() as usize; + // Note: this function may call `inspect_with_uninit_and_ptr_outside_interpreter`, + // so `range` must be within the bounds of `alloc` and not contain or overlap a relocation. + fn append_chunks_of_init_and_uninit_bytes<'ll, 'a, 'b>( + llvals: &mut Vec<&'ll Value>, + cx: &'a CodegenCx<'ll, 'b>, + alloc: &'a Allocation, + range: Range<usize>, + ) { + let mut chunks = alloc + .init_mask() + .range_as_init_chunks(Size::from_bytes(range.start), Size::from_bytes(range.end)); + + let chunk_to_llval = move |chunk| match chunk { + InitChunk::Init(range) => { + let range = (range.start.bytes() as usize)..(range.end.bytes() as usize); + let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range); + cx.const_bytes(bytes) + } + InitChunk::Uninit(range) => { + let len = range.end.bytes() - range.start.bytes(); + cx.const_undef(cx.type_array(cx.type_i8(), len)) + } + }; + + // Generating partially-uninit consts inhibits optimizations, so it is disabled by default. + // See https://github.com/rust-lang/rust/issues/84565. + let allow_partially_uninit = + match cx.sess().opts.debugging_opts.partially_uninit_const_threshold { + Some(max) => range.len() <= max, + None => false, + }; + + if allow_partially_uninit { + llvals.extend(chunks.map(chunk_to_llval)); + } else { + let llval = match (chunks.next(), chunks.next()) { + (Some(chunk), None) => { + // exactly one chunk, either fully init or fully uninit + chunk_to_llval(chunk) + } + _ => { + // partially uninit, codegen as if it was initialized + // (using some arbitrary value for uninit bytes) + let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range); + cx.const_bytes(bytes) + } + }; + llvals.push(llval); + } + } + let mut next_offset = 0; for &(offset, alloc_id) in alloc.relocations().iter() { let offset = offset.bytes(); @@ -32,12 +88,8 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll if offset > next_offset { // This `inspect` is okay since we have checked that it is not within a relocation, it // is within the bounds of the allocation, and it doesn't affect interpreter execution - // (we inspect the result after interpreter execution). Any undef byte is replaced with - // some arbitrary byte value. - // - // FIXME: relay undef bytes to codegen as undef const bytes - let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset); - llvals.push(cx.const_bytes(bytes)); + // (we inspect the result after interpreter execution). + append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, next_offset..offset); } let ptr_offset = read_target_uint( dl.endian, @@ -59,7 +111,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), &cx.tcx, ), - &Scalar { value: Primitive::Pointer, valid_range: 0..=!0 }, + Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } }, cx.type_i8p_ext(address_space), )); next_offset = offset + pointer_size; @@ -68,12 +120,8 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll let range = next_offset..alloc.len(); // This `inspect` is okay since we have check that it is after all relocations, it is // within the bounds of the allocation, and it doesn't affect interpreter execution (we - // inspect the result after interpreter execution). Any undef byte is replaced with some - // arbitrary byte value. - // - // FIXME: relay undef bytes to codegen as undef const bytes - let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range); - llvals.push(cx.const_bytes(bytes)); + // inspect the result after interpreter execution). + append_chunks_of_init_and_uninit_bytes(&mut llvals, cx, alloc, range); } cx.const_struct(&llvals, true) @@ -130,7 +178,7 @@ fn check_and_apply_linkage( }; unsafe { // Declare a symbol `foo` with the desired linkage. - let g1 = cx.declare_global(&sym, llty2); + let g1 = cx.declare_global(sym, llty2); llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage)); // Declare an internal global `extern_with_linkage_foo` which @@ -140,7 +188,7 @@ fn check_and_apply_linkage( // `extern_with_linkage_foo` will instead be initialized to // zero. let mut real_name = "_rust_extern_with_linkage_".to_string(); - real_name.push_str(&sym); + real_name.push_str(sym); let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| { cx.sess().span_fatal( cx.tcx.def_span(span_def_id), @@ -154,7 +202,7 @@ fn check_and_apply_linkage( } else { // Generate an external declaration. // FIXME(nagisa): investigate whether it can be changed into define_global - cx.declare_global(&sym, llty) + cx.declare_global(sym, llty) } } @@ -186,7 +234,7 @@ impl CodegenCx<'ll, 'tcx> { _ => self.define_private_global(self.val_ty(cv)), }; llvm::LLVMSetInitializer(gv, cv); - set_global_alignment(&self, gv, align); + set_global_alignment(self, gv, align); llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global); gv } @@ -231,7 +279,7 @@ impl CodegenCx<'ll, 'tcx> { g } else { - check_and_apply_linkage(&self, &fn_attrs, ty, sym, def_id) + check_and_apply_linkage(self, fn_attrs, ty, sym, def_id) }; // Thread-local statics in some other crate need to *always* be linked @@ -321,7 +369,7 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> { unsafe { let attrs = self.tcx.codegen_fn_attrs(def_id); - let (v, alloc) = match codegen_static_initializer(&self, def_id) { + let (v, alloc) = match codegen_static_initializer(self, def_id) { Ok(v) => v, // Error has already been reported Err(_) => return, @@ -369,7 +417,7 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> { self.statics_to_rauw.borrow_mut().push((g, new_g)); new_g }; - set_global_alignment(&self, g, self.align_of(ty)); + set_global_alignment(self, g, self.align_of(ty)); llvm::LLVMSetInitializer(g, v); if self.should_assume_dso_local(g, true) { @@ -382,7 +430,7 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> { llvm::LLVMSetGlobalConstant(g, llvm::True); } - debuginfo::create_global_var_metadata(&self, def_id, g); + debuginfo::create_global_var_metadata(self, def_id, g); if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { llvm::set_thread_local_mode(g, self.tls_model); @@ -470,11 +518,17 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> { ); } } else { - base::set_link_section(g, &attrs); + base::set_link_section(g, attrs); } if attrs.flags.contains(CodegenFnAttrFlags::USED) { - self.add_used_global(g); + // The semantics of #[used] in Rust only require the symbol to make it into the + // object file. It is explicitly allowed for the linker to strip the symbol if it + // is dead. As such, use llvm.compiler.used instead of llvm.used. + // Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique + // sections with SHF_GNU_RETAIN flag for llvm.used symbols, which may trigger bugs + // in some versions of the gold linker. + self.add_compiler_used_global(g); } } } @@ -484,4 +538,11 @@ impl StaticMethods for CodegenCx<'ll, 'tcx> { let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; self.used_statics.borrow_mut().push(cast); } + + /// Add a global value to a list to be stored in the `llvm.compiler.used` variable, + /// an array of i8*. + fn add_compiler_used_global(&self, global: &'ll Value) { + let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; + self.compiler_used_statics.borrow_mut().push(cast); + } } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 59259857b4b..257a0ac89d8 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -14,16 +14,22 @@ use rustc_codegen_ssa::traits::*; use rustc_data_structures::base_n; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_middle::bug; use rustc_middle::mir::mono::CodegenUnit; -use rustc_middle::ty::layout::{HasParamEnv, LayoutError, TyAndLayout}; +use rustc_middle::ty::layout::{ + FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, LayoutError, LayoutOfHelpers, + TyAndLayout, +}; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; +use rustc_middle::{bug, span_bug}; use rustc_session::config::{CFGuard, CrateType, DebugInfo}; use rustc_session::Session; -use rustc_span::source_map::{Span, DUMMY_SP}; +use rustc_span::source_map::Span; use rustc_span::symbol::Symbol; -use rustc_target::abi::{HasDataLayout, LayoutOf, PointeeInfo, Size, TargetDataLayout, VariantIdx}; +use rustc_target::abi::{ + call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx, +}; use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel}; +use smallvec::SmallVec; use std::cell::{Cell, RefCell}; use std::ffi::CStr; @@ -74,8 +80,16 @@ pub struct CodegenCx<'ll, 'tcx> { /// See <https://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details pub used_statics: RefCell<Vec<&'ll Value>>, - pub lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), &'ll Type>>, + /// Statics that will be placed in the llvm.compiler.used variable + /// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details + pub compiler_used_statics: RefCell<Vec<&'ll Value>>, + + /// Mapping of non-scalar types to llvm types and field remapping if needed. + pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>, + + /// Mapping of scalar types to llvm types. pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>, + pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>, pub isize_ty: &'ll Type, @@ -84,14 +98,23 @@ pub struct CodegenCx<'ll, 'tcx> { eh_personality: Cell<Option<&'ll Value>>, eh_catch_typeinfo: Cell<Option<&'ll Value>>, - pub rust_try_fn: Cell<Option<&'ll Value>>, + pub rust_try_fn: Cell<Option<(&'ll Type, &'ll Value)>>, - intrinsics: RefCell<FxHashMap<&'static str, &'ll Value>>, + intrinsics: RefCell<FxHashMap<&'static str, (&'ll Type, &'ll Value)>>, /// A counter that is used for generating local symbol names local_gen_sym_counter: Cell<usize>, } +pub struct TypeLowering<'ll> { + /// Associated LLVM type + pub lltype: &'ll Type, + + /// If padding is used the slice maps fields from source order + /// to llvm order. + pub field_remapping: Option<SmallVec<[u32; 4]>>, +} + fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode { match tls_model { TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic, @@ -101,10 +124,6 @@ fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode { } } -fn strip_powerpc64_vectors(data_layout: String) -> String { - data_layout.replace("-v256:256:256-v512:512:512", "") -} - pub unsafe fn create_module( tcx: TyCtxt<'_>, llcx: &'ll llvm::Context, @@ -116,7 +135,18 @@ pub unsafe fn create_module( let mut target_data_layout = sess.target.data_layout.clone(); if llvm_util::get_version() < (12, 0, 0) && sess.target.arch == "powerpc64" { - target_data_layout = strip_powerpc64_vectors(target_data_layout); + target_data_layout = target_data_layout.replace("-v256:256:256-v512:512:512", ""); + } + if llvm_util::get_version() < (13, 0, 0) { + if sess.target.arch == "powerpc64" { + target_data_layout = target_data_layout.replace("-S128", ""); + } + if sess.target.arch == "wasm32" { + target_data_layout = "e-m:e-p:32:32-i64:64-n32:64-S128".to_string(); + } + if sess.target.arch == "wasm64" { + target_data_layout = "e-m:e-p:64:64-i64:64-n32:64-S128".to_string(); + } } // Ensure the data-layout values hardcoded remain the defaults. @@ -165,11 +195,14 @@ pub unsafe fn create_module( let llvm_target = SmallCStr::new(&sess.target.llvm_target); llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr()); - if sess.relocation_model() == RelocModel::Pic { + let reloc_model = sess.relocation_model(); + if matches!(reloc_model, RelocModel::Pic | RelocModel::Pie) { llvm::LLVMRustSetModulePICLevel(llmod); // PIE is potentially more effective than PIC, but can only be used in executables. // If all our outputs are executables, then we can relax PIC to PIE. - if sess.crate_types().iter().all(|ty| *ty == CrateType::Executable) { + if reloc_model == RelocModel::Pie + || sess.crate_types().iter().all(|ty| *ty == CrateType::Executable) + { llvm::LLVMRustSetModulePIELevel(llmod); } } @@ -304,7 +337,8 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { const_globals: Default::default(), statics_to_rauw: RefCell::new(Vec::new()), used_statics: RefCell::new(Vec::new()), - lltypes: Default::default(), + compiler_used_statics: RefCell::new(Vec::new()), + type_lowering: Default::default(), scalar_lltypes: Default::default(), pointee_infos: Default::default(), isize_ty, @@ -326,6 +360,18 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { pub fn coverage_context(&'a self) -> Option<&'a coverageinfo::CrateCoverageContext<'ll, 'tcx>> { self.coverage_cx.as_ref() } + + fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) { + let section = cstr!("llvm.metadata"); + let array = self.const_array(self.type_ptr_to(self.type_i8()), values); + + unsafe { + let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr()); + llvm::LLVMSetInitializer(g, array); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); + llvm::LLVMSetSection(g, section.as_ptr()); + } + } } impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { @@ -401,7 +447,7 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn sess(&self) -> &Session { - &self.tcx.sess + self.tcx.sess } fn check_overflow(&self) -> bool { @@ -416,6 +462,10 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { &self.used_statics } + fn compiler_used_statics(&self) -> &RefCell<Vec<&'ll Value>> { + &self.compiler_used_statics + } + fn set_frame_pointer_type(&self, llfn: &'ll Value) { attributes::set_frame_pointer_type(self, llfn) } @@ -426,17 +476,14 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn create_used_variable(&self) { - let name = cstr!("llvm.used"); - let section = cstr!("llvm.metadata"); - let array = - self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow()); + self.create_used_variable_impl(cstr!("llvm.used"), &*self.used_statics.borrow()); + } - unsafe { - let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr()); - llvm::LLVMSetInitializer(g, array); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); - llvm::LLVMSetSection(g, section.as_ptr()); - } + fn create_compiler_used_variable(&self) { + self.create_used_variable_impl( + cstr!("llvm.compiler.used"), + &*self.compiler_used_statics.borrow(), + ); } fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> { @@ -452,7 +499,7 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { } impl CodegenCx<'b, 'tcx> { - crate fn get_intrinsic(&self, key: &str) -> &'b Value { + crate fn get_intrinsic(&self, key: &str) -> (&'b Type, &'b Value) { if let Some(v) = self.intrinsics.borrow().get(key).cloned() { return v; } @@ -465,18 +512,18 @@ impl CodegenCx<'b, 'tcx> { name: &'static str, args: Option<&[&'b llvm::Type]>, ret: &'b llvm::Type, - ) -> &'b llvm::Value { + ) -> (&'b llvm::Type, &'b llvm::Value) { let fn_ty = if let Some(args) = args { self.type_func(args, ret) } else { self.type_variadic_func(&[], ret) }; let f = self.declare_cfn(name, llvm::UnnamedAddr::No, fn_ty); - self.intrinsics.borrow_mut().insert(name, f); - f + self.intrinsics.borrow_mut().insert(name, (fn_ty, f)); + (fn_ty, f) } - fn declare_intrinsic(&self, key: &str) -> Option<&'b Value> { + fn declare_intrinsic(&self, key: &str) -> Option<(&'b Type, &'b Value)> { macro_rules! ifn { ($name:expr, fn() -> $ret:expr) => ( if key == $name { @@ -796,27 +843,58 @@ impl ty::layout::HasTyCtxt<'tcx> for CodegenCx<'ll, 'tcx> { } } -impl LayoutOf for CodegenCx<'ll, 'tcx> { - type Ty = Ty<'tcx>; - type TyAndLayout = TyAndLayout<'tcx>; - - fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout { - self.spanned_layout_of(ty, DUMMY_SP) +impl<'tcx, 'll> HasParamEnv<'tcx> for CodegenCx<'ll, 'tcx> { + fn param_env(&self) -> ty::ParamEnv<'tcx> { + ty::ParamEnv::reveal_all() } +} - fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::TyAndLayout { - self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)).unwrap_or_else(|e| { - if let LayoutError::SizeOverflow(_) = e { - self.sess().span_fatal(span, &e.to_string()) - } else { - bug!("failed to get layout for `{}`: {}", ty, e) - } - }) +impl LayoutOfHelpers<'tcx> for CodegenCx<'ll, 'tcx> { + type LayoutOfResult = TyAndLayout<'tcx>; + + #[inline] + fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { + if let LayoutError::SizeOverflow(_) = err { + self.sess().span_fatal(span, &err.to_string()) + } else { + span_bug!(span, "failed to get layout for `{}`: {}", ty, err) + } } } -impl<'tcx, 'll> HasParamEnv<'tcx> for CodegenCx<'ll, 'tcx> { - fn param_env(&self) -> ty::ParamEnv<'tcx> { - ty::ParamEnv::reveal_all() +impl FnAbiOfHelpers<'tcx> for CodegenCx<'ll, 'tcx> { + type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>; + + #[inline] + fn handle_fn_abi_err( + &self, + err: FnAbiError<'tcx>, + span: Span, + fn_abi_request: FnAbiRequest<'tcx>, + ) -> ! { + if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err { + self.sess().span_fatal(span, &err.to_string()) + } else { + match fn_abi_request { + FnAbiRequest::OfFnPtr { sig, extra_args } => { + span_bug!( + span, + "`fn_abi_of_fn_ptr({}, {:?})` failed: {}", + sig, + extra_args, + err + ); + } + FnAbiRequest::OfInstance { instance, extra_args } => { + span_bug!( + span, + "`fn_abi_of_instance({}, {:?})` failed: {}", + instance, + extra_args, + err + ); + } + } + } } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index d2a2e739ff3..6830864ba04 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -73,7 +73,7 @@ pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) { mapgen.write_coverage_mapping(expressions, counter_regions, coverage_mapping_buffer); }); debug_assert!( - coverage_mapping_buffer.len() > 0, + !coverage_mapping_buffer.is_empty(), "Every `FunctionCoverage` should have at least one counter" ); @@ -311,8 +311,7 @@ fn add_unused_functions<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) { // for each region in it's MIR. // Convert the `HashSet` of `codegenned_def_ids` to a sortable vector, and sort them. - let mut sorted_codegenned_def_ids: Vec<DefId> = - codegenned_def_ids.iter().map(|def_id| *def_id).collect(); + let mut sorted_codegenned_def_ids: Vec<DefId> = codegenned_def_ids.iter().copied().collect(); sorted_codegenned_def_ids.sort_unstable(); let mut first_covered_def_id_by_file: FxHashMap<Symbol, DefId> = FxHashMap::default(); diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 019bf4a09a7..ef11e2972ea 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -1,6 +1,6 @@ use crate::llvm; -use crate::abi::{Abi, FnAbi}; +use crate::abi::Abi; use crate::builder::Builder; use crate::common::CodegenCx; @@ -20,7 +20,7 @@ use rustc_middle::mir::coverage::{ CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, Op, }; use rustc_middle::ty; -use rustc_middle::ty::layout::FnAbiExt; +use rustc_middle::ty::layout::FnAbiOf; use rustc_middle::ty::subst::InternalSubsts; use rustc_middle::ty::Instance; @@ -199,9 +199,8 @@ fn declare_unused_fn(cx: &CodegenCx<'ll, 'tcx>, def_id: &DefId) -> Instance<'tcx ); let llfn = cx.declare_fn( - &tcx.symbol_name(instance).name, - &FnAbi::of_fn_ptr( - cx, + tcx.symbol_name(instance).name, + cx.fn_abi_of_fn_ptr( ty::Binder::dummy(tcx.mk_fn_sig( iter::once(tcx.mk_unit()), tcx.mk_unit(), @@ -209,7 +208,7 @@ fn declare_unused_fn(cx: &CodegenCx<'ll, 'tcx>, def_id: &DefId) -> Instance<'tcx hir::Unsafety::Unsafe, Abi::Rust, )), - &[], + ty::List::empty(), ), ); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs index c2725b83f50..58f8573a2ac 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs @@ -3,12 +3,11 @@ use super::utils::DIB; use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext}; use rustc_codegen_ssa::traits::*; -use crate::abi::FnAbi; use crate::common::CodegenCx; use crate::llvm; use crate::llvm::debuginfo::{DILocation, DIScope}; use rustc_middle::mir::{Body, SourceScope}; -use rustc_middle::ty::layout::FnAbiExt; +use rustc_middle::ty::layout::FnAbiOf; use rustc_middle::ty::{self, Instance}; use rustc_session::config::DebugInfo; @@ -42,7 +41,7 @@ pub fn compute_mir_scopes( // Instantiate all scopes. for idx in 0..mir.source_scopes.len() { let scope = SourceScope::new(idx); - make_mir_scope(cx, instance, &mir, fn_dbg_scope, &has_variables, debug_context, scope); + make_mir_scope(cx, instance, mir, fn_dbg_scope, &has_variables, debug_context, scope); } } @@ -94,8 +93,8 @@ fn make_mir_scope( ty::ParamEnv::reveal_all(), callee, ); - let callee_fn_abi = FnAbi::of_instance(cx, callee, &[]); - cx.dbg_scope_fn(callee, &callee_fn_abi, None) + let callee_fn_abi = cx.fn_abi_of_instance(callee, ty::List::empty()); + cx.dbg_scope_fn(callee, callee_fn_abi, None) } None => unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlock( diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index de3f719b816..ae1f83d944f 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -15,12 +15,11 @@ use rustc_span::symbol::sym; /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) { if needs_gdb_debug_scripts_section(bx) { - let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx); + let gdb_debug_scripts_section = + bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p()); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. - let indices = [bx.const_i32(0), bx.const_i32(0)]; - let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); - let volative_load_instruction = bx.volatile_load(bx.type_i8(), element); + let volative_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section); unsafe { llvm::LLVMSetAlignment(volative_load_instruction, 1); } @@ -60,10 +59,8 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) - } pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool { - let omit_gdb_pretty_printer_section = cx - .tcx - .sess - .contains_name(&cx.tcx.hir().krate_attrs(), sym::omit_gdb_pretty_printer_section); + let omit_gdb_pretty_printer_section = + cx.tcx.sess.contains_name(cx.tcx.hir().krate_attrs(), sym::omit_gdb_pretty_printer_section); !omit_gdb_pretty_printer_section && cx.sess().opts.debuginfo != DebugInfo::None diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 7e136c1b24c..f6ec5e6395f 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -28,15 +28,16 @@ use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_index::vec::{Idx, IndexVec}; use rustc_middle::ich::NodeIdHashingMode; use rustc_middle::mir::{self, GeneratorLayout}; -use rustc_middle::ty::layout::{self, IntegerExt, PrimitiveExt, TyAndLayout}; +use rustc_middle::ty::layout::{self, IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout}; use rustc_middle::ty::subst::GenericArgKind; use rustc_middle::ty::Instance; use rustc_middle::ty::{self, AdtKind, GeneratorSubsts, ParamEnv, Ty, TyCtxt}; use rustc_middle::{bug, span_bug}; use rustc_session::config::{self, DebugInfo}; -use rustc_span::symbol::{Interner, Symbol}; +use rustc_span::symbol::Symbol; +use rustc_span::FileNameDisplayPreference; use rustc_span::{self, SourceFile, SourceFileHash, Span}; -use rustc_target::abi::{Abi, Align, HasDataLayout, Integer, LayoutOf, TagEncoding}; +use rustc_target::abi::{Abi, Align, HasDataLayout, Integer, TagEncoding}; use rustc_target::abi::{Int, Pointer, F32, F64}; use rustc_target::abi::{Primitive, Size, VariantIdx, Variants}; use tracing::debug; @@ -88,8 +89,54 @@ pub const UNKNOWN_COLUMN_NUMBER: c_uint = 0; pub const NO_SCOPE_METADATA: Option<&DIScope> = None; -#[derive(Copy, Debug, Hash, Eq, PartialEq, Clone)] -pub struct UniqueTypeId(Symbol); +mod unique_type_id { + use super::*; + use rustc_arena::DroplessArena; + + #[derive(Copy, Hash, Eq, PartialEq, Clone)] + pub(super) struct UniqueTypeId(u32); + + // The `&'static str`s in this type actually point into the arena. + // + // The `FxHashMap`+`Vec` pair could be replaced by `FxIndexSet`, but #75278 + // found that to regress performance up to 2% in some cases. This might be + // revisited after further improvements to `indexmap`. + #[derive(Default)] + pub(super) struct TypeIdInterner { + arena: DroplessArena, + names: FxHashMap<&'static str, UniqueTypeId>, + strings: Vec<&'static str>, + } + + impl TypeIdInterner { + #[inline] + pub(super) fn intern(&mut self, string: &str) -> UniqueTypeId { + if let Some(&name) = self.names.get(string) { + return name; + } + + let name = UniqueTypeId(self.strings.len() as u32); + + // `from_utf8_unchecked` is safe since we just allocated a `&str` which is known to be + // UTF-8. + let string: &str = + unsafe { std::str::from_utf8_unchecked(self.arena.alloc_slice(string.as_bytes())) }; + // It is safe to extend the arena allocation to `'static` because we only access + // these while the arena is still alive. + let string: &'static str = unsafe { &*(string as *const str) }; + self.strings.push(string); + self.names.insert(string, name); + name + } + + // Get the symbol as a string. `Symbol::as_str()` should be used in + // preference to this function. + pub(super) fn get(&self, symbol: UniqueTypeId) -> &str { + self.strings[symbol.0 as usize] + } + } +} +use unique_type_id::*; /// The `TypeMap` is where the `CrateDebugContext` holds the type metadata nodes /// created so far. The metadata nodes are indexed by `UniqueTypeId`, and, for @@ -98,7 +145,7 @@ pub struct UniqueTypeId(Symbol); #[derive(Default)] pub struct TypeMap<'ll, 'tcx> { /// The `UniqueTypeId`s created so far. - unique_id_interner: Interner, + unique_id_interner: TypeIdInterner, /// A map from `UniqueTypeId` to debuginfo metadata for that type. This is a 1:1 mapping. unique_id_to_metadata: FxHashMap<UniqueTypeId, &'ll DIType>, /// A map from types to debuginfo metadata. This is an N:1 mapping. @@ -165,8 +212,7 @@ impl TypeMap<'ll, 'tcx> { /// Gets the string representation of a `UniqueTypeId`. This method will fail if /// the ID is unknown. fn get_unique_type_id_as_string(&self, unique_type_id: UniqueTypeId) -> &str { - let UniqueTypeId(interner_key) = unique_type_id; - self.unique_id_interner.get(interner_key) + self.unique_id_interner.get(unique_type_id) } /// Gets the `UniqueTypeId` for the given type. If the `UniqueTypeId` for the given @@ -196,9 +242,9 @@ impl TypeMap<'ll, 'tcx> { let unique_type_id = hasher.finish::<Fingerprint>().to_hex(); let key = self.unique_id_interner.intern(&unique_type_id); - self.type_to_unique_id.insert(type_, UniqueTypeId(key)); + self.type_to_unique_id.insert(type_, key); - UniqueTypeId(key) + key } /// Gets the `UniqueTypeId` for an enum variant. Enum variants are not really @@ -214,7 +260,7 @@ impl TypeMap<'ll, 'tcx> { let enum_variant_type_id = format!("{}::{}", self.get_unique_type_id_as_string(enum_type_id), variant_name); let interner_key = self.unique_id_interner.intern(&enum_variant_type_id); - UniqueTypeId(interner_key) + interner_key } /// Gets the unique type ID string for an enum variant part. @@ -431,7 +477,7 @@ fn subroutine_type_metadata( let signature_metadata: Vec<_> = iter::once( // return type match signature.output().kind() { - ty::Tuple(ref tys) if tys.is_empty() => None, + ty::Tuple(tys) if tys.is_empty() => None, _ => Some(type_metadata(cx, signature.output(), span)), }, ) @@ -601,7 +647,7 @@ pub fn type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>, usage_site_span: Sp ty::Never | ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) => { MetadataCreationResult::new(basic_type_metadata(cx, t), false) } - ty::Tuple(ref elements) if elements.is_empty() => { + ty::Tuple(elements) if elements.is_empty() => { MetadataCreationResult::new(basic_type_metadata(cx, t), false) } ty::Array(typ, _) | ty::Slice(typ) => { @@ -700,7 +746,7 @@ pub fn type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>, usage_site_span: Sp .finalize(cx) } }, - ty::Tuple(ref elements) => { + ty::Tuple(elements) => { let tys: Vec<_> = elements.iter().map(|k| k.expect_ty()).collect(); prepare_tuple_metadata(cx, t, &tys, unique_type_id, usage_site_span, NO_SCOPE_METADATA) .finalize(cx) @@ -771,7 +817,13 @@ pub fn file_metadata(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> &'ll let hash = Some(&source_file.src_hash); let file_name = Some(source_file.name.prefer_remapped().to_string()); let directory = if source_file.is_real_file() && !source_file.is_imported() { - Some(cx.sess().working_dir.to_string_lossy(false).to_string()) + Some( + cx.sess() + .opts + .working_dir + .to_string_lossy(FileNameDisplayPreference::Remapped) + .to_string(), + ) } else { // If the path comes from an upstream crate we assume it has been made // independent of the compiler's working directory one way or another. @@ -880,7 +932,7 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { let (name, encoding) = match t.kind() { ty::Never => ("!", DW_ATE_unsigned), - ty::Tuple(ref elements) if elements.is_empty() => ("()", DW_ATE_unsigned), + ty::Tuple(elements) if elements.is_empty() => ("()", DW_ATE_unsigned), ty::Bool => ("bool", DW_ATE_boolean), ty::Char => ("char", DW_ATE_unsigned_char), ty::Int(int_ty) if msvc_like_names => (int_ty.msvc_basic_name(), DW_ATE_signed), @@ -999,7 +1051,7 @@ pub fn compile_unit_metadata( let producer = format!("clang LLVM ({})", rustc_producer); let name_in_debuginfo = name_in_debuginfo.to_string_lossy(); - let work_dir = tcx.sess.working_dir.to_string_lossy(false); + let work_dir = tcx.sess.opts.working_dir.to_string_lossy(FileNameDisplayPreference::Remapped); let flags = "\0"; let output_filenames = tcx.output_filenames(()); let out_dir = &output_filenames.out_directory; @@ -1071,7 +1123,7 @@ pub fn compile_unit_metadata( let gcov_cu_info = [ path_to_mdstring(debug_context.llcontext, &output_filenames.with_extension("gcno")), - path_to_mdstring(debug_context.llcontext, &gcda_path), + path_to_mdstring(debug_context.llcontext, gcda_path), cu_desc_metadata, ]; let gcov_metadata = llvm::LLVMMDNodeInContext( @@ -1280,6 +1332,31 @@ fn prepare_struct_metadata( // Tuples //=----------------------------------------------------------------------------- +/// Returns names of captured upvars for closures and generators. +/// +/// Here are some examples: +/// - `name__field1__field2` when the upvar is captured by value. +/// - `_ref__name__field` when the upvar is captured by reference. +fn closure_saved_names_of_captured_variables(tcx: TyCtxt<'tcx>, def_id: DefId) -> Vec<String> { + let body = tcx.optimized_mir(def_id); + + body.var_debug_info + .iter() + .filter_map(|var| { + let is_ref = match var.value { + mir::VarDebugInfoContents::Place(place) if place.local == mir::Local::new(1) => { + // The projection is either `[.., Field, Deref]` or `[.., Field]`. It + // implies whether the variable is captured by value or by reference. + matches!(place.projection.last().unwrap(), mir::ProjectionElem::Deref) + } + _ => return None, + }; + let prefix = if is_ref { "_ref__" } else { "" }; + Some(prefix.to_owned() + &var.name.as_str()) + }) + .collect::<Vec<_>>() +} + /// Creates `MemberDescription`s for the fields of a tuple. struct TupleMemberDescriptionFactory<'tcx> { ty: Ty<'tcx>, @@ -1289,14 +1366,25 @@ struct TupleMemberDescriptionFactory<'tcx> { impl<'tcx> TupleMemberDescriptionFactory<'tcx> { fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) -> Vec<MemberDescription<'ll>> { + let mut capture_names = match *self.ty.kind() { + ty::Generator(def_id, ..) | ty::Closure(def_id, ..) => { + Some(closure_saved_names_of_captured_variables(cx.tcx, def_id).into_iter()) + } + _ => None, + }; let layout = cx.layout_of(self.ty); self.component_types .iter() .enumerate() .map(|(i, &component_type)| { let (size, align) = cx.size_and_align_of(component_type); + let name = if let Some(names) = capture_names.as_mut() { + names.next().unwrap() + } else { + format!("__{}", i) + }; MemberDescription { - name: format!("__{}", i), + name, type_metadata: type_metadata(cx, component_type, self.span), offset: layout.fields.offset(i), size, @@ -1613,7 +1701,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { Variants::Multiple { tag_encoding: TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant }, - ref tag, + tag, ref variants, tag_field, } => { @@ -1875,17 +1963,13 @@ impl<'tcx> VariantInfo<'_, 'tcx> { } fn source_info(&self, cx: &CodegenCx<'ll, 'tcx>) -> Option<SourceInfo<'ll>> { - match self { - VariantInfo::Generator { def_id, variant_index, .. } => { - let span = cx.tcx.generator_layout(*def_id).unwrap().variant_source_info - [*variant_index] - .span; - if !span.is_dummy() { - let loc = cx.lookup_debug_loc(span.lo()); - return Some(SourceInfo { file: file_metadata(cx, &loc.file), line: loc.line }); - } + if let VariantInfo::Generator { def_id, variant_index, .. } = self { + let span = + cx.tcx.generator_layout(*def_id).unwrap().variant_source_info[*variant_index].span; + if !span.is_dummy() { + let loc = cx.lookup_debug_loc(span.lo()); + return Some(SourceInfo { file: file_metadata(cx, &loc.file), line: loc.line }); } - _ => {} } None } @@ -1906,11 +1990,11 @@ fn describe_enum_variant( let unique_type_id = debug_context(cx) .type_map .borrow_mut() - .get_unique_type_id_of_enum_variant(cx, layout.ty, &variant_name); + .get_unique_type_id_of_enum_variant(cx, layout.ty, variant_name); create_struct_stub( cx, layout.ty, - &variant_name, + variant_name, unique_type_id, Some(containing_scope), DIFlags::FlagZero, @@ -2039,10 +2123,8 @@ fn prepare_enum_metadata( let layout = cx.layout_of(enum_type); - if let ( - &Abi::Scalar(_), - &Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. }, - ) = (&layout.abi, &layout.variants) + if let (Abi::Scalar(_), Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. }) = + (layout.abi, &layout.variants) { return FinalMetadata(discriminant_type_metadata(tag.value)); } @@ -2050,8 +2132,8 @@ fn prepare_enum_metadata( if use_enum_fallback(cx) { let discriminant_type_metadata = match layout.variants { Variants::Single { .. } => None, - Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, ref tag, .. } - | Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. } => { + Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } + | Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => { Some(discriminant_type_metadata(tag.value)) } }; @@ -2103,9 +2185,7 @@ fn prepare_enum_metadata( // A single-variant enum has no discriminant. Variants::Single { .. } => None, - Variants::Multiple { - tag_encoding: TagEncoding::Niche { .. }, ref tag, tag_field, .. - } => { + Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, tag_field, .. } => { // Find the integer type of the correct size. let size = tag.value.size(cx); let align = tag.value.align(cx); @@ -2136,7 +2216,7 @@ fn prepare_enum_metadata( } } - Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, tag_field, .. } => { + Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, tag_field, .. } => { let discr_type = tag.value.to_ty(cx.tcx); let (size, align) = cx.size_and_align_of(discr_type); @@ -2301,7 +2381,7 @@ fn set_members_of_composite_type( { let mut composite_types_completed = debug_context(cx).composite_types_completed.borrow_mut(); - if !composite_types_completed.insert(&composite_type_metadata) { + if !composite_types_completed.insert(composite_type_metadata) { bug!( "debuginfo::set_members_of_composite_type() - \ Already completed forward declaration re-encountered." diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index 8375d4c7ca5..894320a7982 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -25,13 +25,14 @@ use rustc_data_structures::sync::Lrc; use rustc_hir::def_id::{DefId, DefIdMap}; use rustc_index::vec::IndexVec; use rustc_middle::mir; -use rustc_middle::ty::layout::HasTyCtxt; +use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; use rustc_middle::ty::subst::{GenericArgKind, SubstsRef}; use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeFoldable}; use rustc_session::config::{self, DebugInfo}; +use rustc_session::Session; use rustc_span::symbol::Symbol; use rustc_span::{self, BytePos, Pos, SourceFile, SourceFileAndLine, Span}; -use rustc_target::abi::{LayoutOf, Primitive, Size}; +use rustc_target::abi::{Primitive, Size}; use libc::c_uint; use smallvec::SmallVec; @@ -95,45 +96,52 @@ impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> { composite_types_completed: Default::default(), } } -} - -/// Creates any deferred debug metadata nodes -pub fn finalize(cx: &CodegenCx<'_, '_>) { - if cx.dbg_cx.is_none() { - return; - } - debug!("finalize"); + pub fn finalize(&self, sess: &Session) { + unsafe { + llvm::LLVMRustDIBuilderFinalize(self.builder); + + // Debuginfo generation in LLVM by default uses a higher + // version of dwarf than macOS currently understands. We can + // instruct LLVM to emit an older version of dwarf, however, + // for macOS to understand. For more info see #11352 + // This can be overridden using --llvm-opts -dwarf-version,N. + // Android has the same issue (#22398) + if let Some(version) = sess.target.dwarf_version { + llvm::LLVMRustAddModuleFlag(self.llmod, "Dwarf Version\0".as_ptr().cast(), version) + } - if gdb::needs_gdb_debug_scripts_section(cx) { - // Add a .debug_gdb_scripts section to this compile-unit. This will - // cause GDB to try and load the gdb_load_rust_pretty_printers.py file, - // which activates the Rust pretty printers for binary this section is - // contained in. - gdb::get_or_insert_gdb_debug_scripts_section_global(cx); - } + // Indicate that we want CodeView debug information on MSVC + if sess.target.is_like_msvc { + llvm::LLVMRustAddModuleFlag(self.llmod, "CodeView\0".as_ptr().cast(), 1) + } - unsafe { - llvm::LLVMRustDIBuilderFinalize(DIB(cx)); - // Debuginfo generation in LLVM by default uses a higher - // version of dwarf than macOS currently understands. We can - // instruct LLVM to emit an older version of dwarf, however, - // for macOS to understand. For more info see #11352 - // This can be overridden using --llvm-opts -dwarf-version,N. - // Android has the same issue (#22398) - if let Some(version) = cx.sess().target.dwarf_version { - llvm::LLVMRustAddModuleFlag(cx.llmod, "Dwarf Version\0".as_ptr().cast(), version) + // Prevent bitcode readers from deleting the debug info. + let ptr = "Debug Info Version\0".as_ptr(); + llvm::LLVMRustAddModuleFlag( + self.llmod, + ptr.cast(), + llvm::LLVMRustDebugMetadataVersion(), + ); } + } +} - // Indicate that we want CodeView debug information on MSVC - if cx.sess().target.is_like_msvc { - llvm::LLVMRustAddModuleFlag(cx.llmod, "CodeView\0".as_ptr().cast(), 1) +/// Creates any deferred debug metadata nodes +pub fn finalize(cx: &CodegenCx<'_, '_>) { + if let Some(dbg_cx) = &cx.dbg_cx { + debug!("finalize"); + + if gdb::needs_gdb_debug_scripts_section(cx) { + // Add a .debug_gdb_scripts section to this compile-unit. This will + // cause GDB to try and load the gdb_load_rust_pretty_printers.py file, + // which activates the Rust pretty printers for binary this section is + // contained in. + gdb::get_or_insert_gdb_debug_scripts_section_global(cx); } - // Prevent bitcode readers from deleting the debug info. - let ptr = "Debug Info Version\0".as_ptr(); - llvm::LLVMRustAddModuleFlag(cx.llmod, ptr.cast(), llvm::LLVMRustDebugMetadataVersion()); - }; + dbg_cx.finalize(cx.sess()); + } } impl DebugInfoBuilderMethods for Builder<'a, 'll, 'tcx> { @@ -320,7 +328,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { // name if necessary. let generics = self.tcx().generics_of(enclosing_fn_def_id); let substs = instance.substs.truncate_to(self.tcx(), generics); - let template_parameters = get_template_parameters(self, &generics, substs, &mut name); + let template_parameters = get_template_parameters(self, generics, substs, &mut name); let linkage_name = &mangled_name_of_instance(self, instance).name; // Omit the linkage_name if it is the same as subprogram name. @@ -499,7 +507,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { ty::Adt(def, ..) if !def.is_box() => { // Again, only create type information if full debuginfo is enabled if cx.sess().opts.debuginfo == DebugInfo::Full - && !impl_self_ty.needs_subst() + && !impl_self_ty.definitely_needs_subst(cx.tcx) { Some(type_metadata(cx, impl_self_ty, rustc_span::DUMMY_SP)) } else { @@ -551,7 +559,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { scope_metadata: &'ll DIScope, file: &rustc_span::SourceFile, ) -> &'ll DILexicalBlock { - metadata::extend_scope_to_file(&self, scope_metadata, file) + metadata::extend_scope_to_file(self, scope_metadata, file) } fn debuginfo_finalize(&self) { diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index a48a694b630..22dc8d101c8 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1,4 +1,4 @@ -use crate::abi::{Abi, FnAbi, LlvmType, PassMode}; +use crate::abi::{Abi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode}; use crate::builder::Builder; use crate::context::CodegenCx; use crate::llvm; @@ -7,6 +7,7 @@ use crate::type_of::LayoutLlvmExt; use crate::va_arg::emit_va_arg; use crate::value::Value; +use rustc_ast as ast; use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh}; use rustc_codegen_ssa::common::span_invalid_monomorphization_error; use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; @@ -14,17 +15,17 @@ use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_hir as hir; -use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt}; +use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf}; use rustc_middle::ty::{self, Ty}; use rustc_middle::{bug, span_bug}; use rustc_span::{sym, symbol::kw, Span, Symbol}; -use rustc_target::abi::{self, HasDataLayout, LayoutOf, Primitive}; +use rustc_target::abi::{self, HasDataLayout, Primitive}; use rustc_target::spec::PanicStrategy; use std::cmp::Ordering; use std::iter; -fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Value> { +fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<(&'ll Type, &'ll Value)> { let llvm_name = match name { sym::sqrtf32 => "llvm.sqrt.f32", sym::sqrtf64 => "llvm.sqrt.f64", @@ -70,7 +71,7 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: Symbol) -> Option<&'ll Va sym::roundf64 => "llvm.round.f64", _ => return None, }; - Some(cx.get_intrinsic(&llvm_name)) + Some(cx.get_intrinsic(llvm_name)) } impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { @@ -95,26 +96,26 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let arg_tys = sig.inputs(); let ret_ty = sig.output(); let name = tcx.item_name(def_id); - let name_str = &*name.as_str(); let llret_ty = self.layout_of(ret_ty).llvm_type(self); let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout); let simple = get_simple_intrinsic(self, name); let llval = match name { - _ if simple.is_some() => self.call( - simple.unwrap(), - &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), - None, - ), - sym::likely => { - let expect = self.get_intrinsic(&("llvm.expect.i1")); - self.call(expect, &[args[0].immediate(), self.const_bool(true)], None) + _ if simple.is_some() => { + let (simple_ty, simple_fn) = simple.unwrap(); + self.call( + simple_ty, + simple_fn, + &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), + None, + ) } - sym::unlikely => { - let expect = self.get_intrinsic(&("llvm.expect.i1")); - self.call(expect, &[args[0].immediate(), self.const_bool(false)], None) + sym::likely => { + self.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(true)]) } + sym::unlikely => self + .call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(false)]), kw::Try => { try_intrinsic( self, @@ -125,23 +126,19 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { ); return; } - sym::breakpoint => { - let llfn = self.get_intrinsic(&("llvm.debugtrap")); - self.call(llfn, &[], None) - } + sym::breakpoint => self.call_intrinsic("llvm.debugtrap", &[]), sym::va_copy => { - let intrinsic = self.cx().get_intrinsic(&("llvm.va_copy")); - self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None) + self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()]) } sym::va_arg => { match fn_abi.ret.layout.abi { - abi::Abi::Scalar(ref scalar) => { + abi::Abi::Scalar(scalar) => { match scalar.value { Primitive::Int(..) => { if self.cx().size_of(ret_ty).bytes() < 4 { - // `va_arg` should not be called on a integer type + // `va_arg` should not be called on an integer type // less than 4 bytes in length. If it is, promote - // the integer to a `i32` and truncate the result + // the integer to an `i32` and truncate the result // back to the smaller type. let promoted_result = emit_va_arg(self, args[0], tcx.types.i32); self.trunc(promoted_result, llret_ty) @@ -194,7 +191,6 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { | sym::prefetch_write_data | sym::prefetch_read_instruction | sym::prefetch_write_instruction => { - let expect = self.get_intrinsic(&("llvm.prefetch")); let (rw, cache_type) = match name { sym::prefetch_read_data => (0, 1), sym::prefetch_write_data => (1, 1), @@ -202,15 +198,14 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { sym::prefetch_write_instruction => (1, 0), _ => bug!(), }; - self.call( - expect, + self.call_intrinsic( + "llvm.prefetch", &[ args[0].immediate(), self.const_i32(rw), args[1].immediate(), self.const_i32(cache_type), ], - None, ) } sym::ctlz @@ -229,35 +224,38 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { Some((width, signed)) => match name { sym::ctlz | sym::cttz => { let y = self.const_bool(false); - let llfn = self.get_intrinsic(&format!("llvm.{}.i{}", name, width)); - self.call(llfn, &[args[0].immediate(), y], None) + self.call_intrinsic( + &format!("llvm.{}.i{}", name, width), + &[args[0].immediate(), y], + ) + } + sym::ctlz_nonzero => { + let y = self.const_bool(true); + let llvm_name = &format!("llvm.ctlz.i{}", width); + self.call_intrinsic(llvm_name, &[args[0].immediate(), y]) } - sym::ctlz_nonzero | sym::cttz_nonzero => { + sym::cttz_nonzero => { let y = self.const_bool(true); - let llvm_name = &format!("llvm.{}.i{}", &name_str[..4], width); - let llfn = self.get_intrinsic(llvm_name); - self.call(llfn, &[args[0].immediate(), y], None) + let llvm_name = &format!("llvm.cttz.i{}", width); + self.call_intrinsic(llvm_name, &[args[0].immediate(), y]) } - sym::ctpop => self.call( - self.get_intrinsic(&format!("llvm.ctpop.i{}", width)), + sym::ctpop => self.call_intrinsic( + &format!("llvm.ctpop.i{}", width), &[args[0].immediate()], - None, ), sym::bswap => { if width == 8 { args[0].immediate() // byte swap a u8/i8 is just a no-op } else { - self.call( - self.get_intrinsic(&format!("llvm.bswap.i{}", width)), + self.call_intrinsic( + &format!("llvm.bswap.i{}", width), &[args[0].immediate()], - None, ) } } - sym::bitreverse => self.call( - self.get_intrinsic(&format!("llvm.bitreverse.i{}", width)), + sym::bitreverse => self.call_intrinsic( + &format!("llvm.bitreverse.i{}", width), &[args[0].immediate()], - None, ), sym::rotate_left | sym::rotate_right => { let is_left = name == sym::rotate_left; @@ -266,8 +264,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { // rotate = funnel shift with first two args the same let llvm_name = &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width); - let llfn = self.get_intrinsic(llvm_name); - self.call(llfn, &[val, val, raw_shift], None) + self.call_intrinsic(llvm_name, &[val, val, raw_shift]) } sym::saturating_add | sym::saturating_sub => { let is_add = name == sym::saturating_add; @@ -279,8 +276,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { if is_add { "add" } else { "sub" }, width ); - let llfn = self.get_intrinsic(llvm_name); - self.call(llfn, &[lhs, rhs], None) + self.call_intrinsic(llvm_name, &[lhs, rhs]) } _ => bug!(), }, @@ -331,13 +327,37 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { let a_ptr = self.bitcast(a, i8p_ty); let b_ptr = self.bitcast(b, i8p_ty); let n = self.const_usize(layout.size.bytes()); - let llfn = self.get_intrinsic("memcmp"); - let cmp = self.call(llfn, &[a_ptr, b_ptr, n], None); + let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]); self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)) } } - _ if name_str.starts_with("simd_") => { + sym::black_box => { + args[0].val.store(self, result); + + // We need to "use" the argument in some way LLVM can't introspect, and on + // targets that support it we can typically leverage inline assembly to do + // this. LLVM's interpretation of inline assembly is that it's, well, a black + // box. This isn't the greatest implementation since it probably deoptimizes + // more than we want, but it's so far good enough. + crate::asm::inline_asm_call( + self, + "", + "r,~{memory}", + &[result.llval], + self.type_void(), + true, + false, + ast::LlvmAsmDialect::Att, + &[span], + ) + .unwrap_or_else(|| bug!("failed to generate inline asm call for `black_box`")); + + // We have copied the value to `result` already. + return; + } + + _ if name.as_str().starts_with("simd_") => { match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { Ok(llval) => llval, Err(()) => return, @@ -361,18 +381,15 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { } fn abort(&mut self) { - let fnname = self.get_intrinsic(&("llvm.trap")); - self.call(fnname, &[], None); + self.call_intrinsic("llvm.trap", &[]); } fn assume(&mut self, val: Self::Value) { - let assume_intrinsic = self.get_intrinsic("llvm.assume"); - self.call(assume_intrinsic, &[val], None); + self.call_intrinsic("llvm.assume", &[val]); } fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value { - let expect = self.get_intrinsic(&"llvm.expect.i1"); - self.call(expect, &[cond, self.const_bool(expected)], None) + self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)]) } fn sideeffect(&mut self) { @@ -380,19 +397,16 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> { // caller of this function is in `rustc_codegen_ssa`, which is agnostic to whether LLVM // codegen backend being used, and so is unable to check the LLVM version. if unsafe { llvm::LLVMRustVersionMajor() } < 12 { - let fnname = self.get_intrinsic(&("llvm.sideeffect")); - self.call(fnname, &[], None); + self.call_intrinsic("llvm.sideeffect", &[]); } } fn va_start(&mut self, va_list: &'ll Value) -> &'ll Value { - let intrinsic = self.cx().get_intrinsic("llvm.va_start"); - self.call(intrinsic, &[va_list], None) + self.call_intrinsic("llvm.va_start", &[va_list]) } fn va_end(&mut self, va_list: &'ll Value) -> &'ll Value { - let intrinsic = self.cx().get_intrinsic("llvm.va_end"); - self.call(intrinsic, &[va_list], None) + self.call_intrinsic("llvm.va_end", &[va_list]) } } @@ -404,7 +418,8 @@ fn try_intrinsic( dest: &'ll Value, ) { if bx.sess().panic_strategy() == PanicStrategy::Abort { - bx.call(try_func, &[data], None); + let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + bx.call(try_func_ty, try_func, &[data], None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. let ret_align = bx.tcx().data_layout.i32_align.abi; @@ -432,7 +447,7 @@ fn codegen_msvc_try( catch_func: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(bx, &mut |mut bx| { + let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| { bx.set_personality_fn(bx.eh_personality()); let mut normal = bx.build_sibling_block("normal"); @@ -502,7 +517,8 @@ fn codegen_msvc_try( // More information can be found in libstd's seh.rs implementation. let ptr_align = bx.tcx().data_layout.pointer_align.abi; let slot = bx.alloca(bx.type_i8p(), ptr_align); - bx.invoke(try_func, &[data], normal.llbb(), catchswitch.llbb(), None); + let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + bx.invoke(try_func_ty, try_func, &[data], normal.llbb(), catchswitch.llbb(), None); normal.ret(bx.const_i32(0)); @@ -544,14 +560,15 @@ fn codegen_msvc_try( let flags = bx.const_i32(8); let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]); let ptr = catchpad_rust.load(bx.type_i8p(), slot, ptr_align); - catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet)); + let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + catchpad_rust.call(catch_ty, catch_func, &[data, ptr], Some(&funclet)); catchpad_rust.catch_ret(&funclet, caught.llbb()); // The flag value of 64 indicates a "catch-all". let flags = bx.const_i32(64); let null = bx.const_null(bx.type_i8p()); let funclet = catchpad_foreign.catch_pad(cs, &[null, flags, null]); - catchpad_foreign.call(catch_func, &[data, null], Some(&funclet)); + catchpad_foreign.call(catch_ty, catch_func, &[data, null], Some(&funclet)); catchpad_foreign.catch_ret(&funclet, caught.llbb()); caught.ret(bx.const_i32(1)); @@ -559,7 +576,7 @@ fn codegen_msvc_try( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -582,7 +599,7 @@ fn codegen_gnu_try( catch_func: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(bx, &mut |mut bx| { + let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| { // Codegens the shims described above: // // bx: @@ -601,7 +618,8 @@ fn codegen_gnu_try( let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); - bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None); + let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + bx.invoke(try_func_ty, try_func, &[data], then.llbb(), catch.llbb(), None); then.ret(bx.const_i32(0)); // Type indicator for the exception being thrown. @@ -615,13 +633,14 @@ fn codegen_gnu_try( let tydesc = bx.const_null(bx.type_i8p()); catch.add_clause(vals, tydesc); let ptr = catch.extract_value(vals, 0); - catch.call(catch_func, &[data, ptr], None); + let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + catch.call(catch_ty, catch_func, &[data, ptr], None); catch.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -636,7 +655,7 @@ fn codegen_emcc_try( catch_func: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(bx, &mut |mut bx| { + let (llty, llfn) = get_rust_try_fn(bx, &mut |mut bx| { // Codegens the shims described above: // // bx: @@ -660,7 +679,8 @@ fn codegen_emcc_try( let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); - bx.invoke(try_func, &[data], then.llbb(), catch.llbb(), None); + let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + bx.invoke(try_func_ty, try_func, &[data], then.llbb(), catch.llbb(), None); then.ret(bx.const_i32(0)); // Type indicator for the exception being thrown. @@ -677,8 +697,7 @@ fn codegen_emcc_try( let selector = catch.extract_value(vals, 1); // Check if the typeid we got is the one for a Rust panic. - let llvm_eh_typeid_for = bx.get_intrinsic("llvm.eh.typeid.for"); - let rust_typeid = catch.call(llvm_eh_typeid_for, &[tydesc], None); + let rust_typeid = catch.call_intrinsic("llvm.eh.typeid.for", &[tydesc]); let is_rust_panic = catch.icmp(IntPredicate::IntEQ, selector, rust_typeid); let is_rust_panic = catch.zext(is_rust_panic, bx.type_bool()); @@ -686,21 +705,30 @@ fn codegen_emcc_try( // create an alloca and pass a pointer to that. let ptr_align = bx.tcx().data_layout.pointer_align.abi; let i8_align = bx.tcx().data_layout.i8_align.abi; - let catch_data = - catch.alloca(bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false), ptr_align); - let catch_data_0 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(0)]); + let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false); + let catch_data = catch.alloca(catch_data_type, ptr_align); + let catch_data_0 = catch.inbounds_gep( + catch_data_type, + catch_data, + &[bx.const_usize(0), bx.const_usize(0)], + ); catch.store(ptr, catch_data_0, ptr_align); - let catch_data_1 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(1)]); + let catch_data_1 = catch.inbounds_gep( + catch_data_type, + catch_data, + &[bx.const_usize(0), bx.const_usize(1)], + ); catch.store(is_rust_panic, catch_data_1, i8_align); let catch_data = catch.bitcast(catch_data, bx.type_i8p()); - catch.call(catch_func, &[data, catch_data], None); + let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + catch.call(catch_ty, catch_func, &[data, catch_data], None); catch.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, llfn, &[try_func, data, catch_func], None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -712,9 +740,10 @@ fn gen_fn<'ll, 'tcx>( name: &str, rust_fn_sig: ty::PolyFnSig<'tcx>, codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>), -) -> &'ll Value { - let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]); - let llfn = cx.declare_fn(name, &fn_abi); +) -> (&'ll Type, &'ll Value) { + let fn_abi = cx.fn_abi_of_fn_ptr(rust_fn_sig, ty::List::empty()); + let llty = fn_abi.llvm_type(cx); + let llfn = cx.declare_fn(name, fn_abi); cx.set_frame_pointer_type(llfn); cx.apply_target_cpu_attr(llfn); // FIXME(eddyb) find a nicer way to do this. @@ -722,7 +751,7 @@ fn gen_fn<'ll, 'tcx>( let llbb = Builder::append_block(cx, llfn, "entry-block"); let bx = Builder::build(cx, llbb); codegen(bx); - llfn + (llty, llfn) } // Helper function used to get a handle to the `__rust_try` function used to @@ -732,7 +761,7 @@ fn gen_fn<'ll, 'tcx>( fn get_rust_try_fn<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>), -) -> &'ll Value { +) -> (&'ll Type, &'ll Value) { if let Some(llfn) = cx.rust_try_fn.get() { return llfn; } @@ -818,7 +847,6 @@ fn generic_simd_intrinsic( let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), callee_ty.fn_sig(tcx)); let arg_tys = sig.inputs(); - let name_str = &*name.as_str(); if name == sym::simd_select_bitmask { let in_ty = arg_tys[0]; @@ -892,13 +920,30 @@ fn generic_simd_intrinsic( )); } - if let Some(stripped) = name_str.strip_prefix("simd_shuffle") { - let n: u64 = stripped.parse().unwrap_or_else(|_| { - span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?") - }); + if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") { + // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer. + // If there is no suffix, use the index array length. + let n: u64 = if stripped.is_empty() { + // Make sure this is actually an array, since typeck only checks the length-suffixed + // version of this intrinsic. + match args[2].layout.ty.kind() { + ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => { + len.try_eval_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else(|| { + span_bug!(span, "could not evaluate shuffle index array length") + }) + } + _ => return_error!( + "simd_shuffle index must be an array of `u32`, got `{}`", + args[2].layout.ty + ), + } + } else { + stripped.parse().unwrap_or_else(|_| { + span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?") + }) + }; require_simd!(ret_ty, "return"); - let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); require!( out_len == n, @@ -1006,7 +1051,7 @@ fn generic_simd_intrinsic( // vector mask and returns an unsigned integer containing the most // significant bit (MSB) of each lane. - // If the vector has less than 8 lanes, an u8 is returned with zeroed + // If the vector has less than 8 lanes, a u8 is returned with zeroed // trailing bits. let expected_int_bits = in_len.max(8); match ret_ty.kind() { @@ -1114,8 +1159,9 @@ fn generic_simd_intrinsic( _ => return_error!("unrecognized intrinsic `{}`", name), }; let llvm_name = &format!("llvm.{0}.v{1}{2}", intr_name, in_len, elem_ty_str); - let f = bx.declare_cfn(&llvm_name, llvm::UnnamedAddr::No, fn_ty); - let c = bx.call(f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None); + let f = bx.declare_cfn(llvm_name, llvm::UnnamedAddr::No, fn_ty); + let c = + bx.call(fn_ty, f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None); Ok(c) } @@ -1292,15 +1338,13 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = bx.declare_cfn( - &llvm_intrinsic, - llvm::UnnamedAddr::No, - bx.type_func( - &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty], - llvm_elem_vec_ty, - ), + let fn_ty = bx.type_func( + &[llvm_pointer_vec_ty, alignment_ty, mask_ty, llvm_elem_vec_ty], + llvm_elem_vec_ty, ); - let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); + let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); + let v = + bx.call(fn_ty, f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); return Ok(v); } @@ -1422,12 +1466,11 @@ fn generic_simd_intrinsic( let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = bx.declare_cfn( - &llvm_intrinsic, - llvm::UnnamedAddr::No, - bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t), - ); - let v = bx.call(f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None); + let fn_ty = + bx.type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t); + let f = bx.declare_cfn(&llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); + let v = + bx.call(fn_ty, f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None); return Ok(v); } @@ -1749,12 +1792,9 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, ); let vec_ty = bx.cx.type_vector(elem_ty, in_len as u64); - let f = bx.declare_cfn( - &llvm_intrinsic, - llvm::UnnamedAddr::No, - bx.type_func(&[vec_ty, vec_ty], vec_ty), - ); - let v = bx.call(f, &[lhs, rhs], None); + let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty); + let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); + let v = bx.call(fn_ty, f, &[lhs, rhs], None); return Ok(v); } diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index aa4db1622b2..8f4d79e7147 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -27,8 +27,8 @@ use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_ssa::{CodegenResults, CompiledModule}; use rustc_data_structures::fx::FxHashMap; use rustc_errors::{ErrorReported, FatalError, Handler}; +use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; -use rustc_middle::middle::cstore::EncodedMetadata; use rustc_middle::ty::TyCtxt; use rustc_session::config::{OptLevel, OutputFilenames, PrintRequest}; use rustc_session::Session; @@ -92,11 +92,12 @@ impl ExtraBackendMethods for LlvmCodegenBackend { fn codegen_allocator<'tcx>( &self, tcx: TyCtxt<'tcx>, - mods: &mut ModuleLlvm, + module_llvm: &mut ModuleLlvm, + module_name: &str, kind: AllocatorKind, has_alloc_error_handler: bool, ) { - unsafe { allocator::codegen(tcx, mods, kind, has_alloc_error_handler) } + unsafe { allocator::codegen(tcx, module_llvm, module_name, kind, has_alloc_error_handler) } } fn compile_codegen_unit( &self, @@ -210,9 +211,16 @@ impl CodegenBackend for LlvmCodegenBackend { match req { PrintRequest::RelocationModels => { println!("Available relocation models:"); - for name in - &["static", "pic", "dynamic-no-pic", "ropi", "rwpi", "ropi-rwpi", "default"] - { + for name in &[ + "static", + "pic", + "pie", + "dynamic-no-pic", + "ropi", + "rwpi", + "ropi-rwpi", + "default", + ] { println!(" {}", name); } println!(); @@ -331,7 +339,7 @@ impl ModuleLlvm { unsafe { let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); let llmod_raw = back::lto::parse_module(llcx, name, buffer, handler)?; - let tm_factory_config = TargetMachineFactoryConfig::new(&cgcx, name.to_str().unwrap()); + let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name.to_str().unwrap()); let tm = match (cgcx.tm_factory)(tm_factory_config) { Ok(m) => m, Err(e) => { @@ -352,8 +360,8 @@ impl ModuleLlvm { impl Drop for ModuleLlvm { fn drop(&mut self) { unsafe { - llvm::LLVMContextDispose(&mut *(self.llcx as *mut _)); llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _)); + llvm::LLVMContextDispose(&mut *(self.llcx as *mut _)); } } } diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs index ccd3e42e458..36aa022d746 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs @@ -6,7 +6,8 @@ pub use self::OptimizationDiagnosticKind::*; use crate::value::Value; use libc::c_uint; -use super::{DiagnosticInfo, Twine}; +use super::{DiagnosticInfo, SMDiagnostic}; +use rustc_span::InnerSpan; #[derive(Copy, Clone)] pub enum OptimizationDiagnosticKind { @@ -86,36 +87,91 @@ impl OptimizationDiagnostic<'ll> { } } -#[derive(Copy, Clone)] -pub struct InlineAsmDiagnostic<'ll> { +pub struct SrcMgrDiagnostic { + pub level: super::DiagnosticLevel, + pub message: String, + pub source: Option<(String, Vec<InnerSpan>)>, +} + +impl SrcMgrDiagnostic { + pub unsafe fn unpack(diag: &SMDiagnostic) -> SrcMgrDiagnostic { + // Recover the post-substitution assembly code from LLVM for better + // diagnostics. + let mut have_source = false; + let mut buffer = String::new(); + let mut level = super::DiagnosticLevel::Error; + let mut loc = 0; + let mut ranges = [0; 8]; + let mut num_ranges = ranges.len() / 2; + let message = super::build_string(|message| { + buffer = super::build_string(|buffer| { + have_source = super::LLVMRustUnpackSMDiagnostic( + diag, + message, + buffer, + &mut level, + &mut loc, + ranges.as_mut_ptr(), + &mut num_ranges, + ); + }) + .expect("non-UTF8 inline asm"); + }) + .expect("non-UTF8 SMDiagnostic"); + + SrcMgrDiagnostic { + message, + level, + source: have_source.then(|| { + let mut spans = vec![InnerSpan::new(loc as usize, loc as usize)]; + for i in 0..num_ranges { + spans.push(InnerSpan::new(ranges[i * 2] as usize, ranges[i * 2 + 1] as usize)); + } + (buffer, spans) + }), + } + } +} + +#[derive(Clone)] +pub struct InlineAsmDiagnostic { pub level: super::DiagnosticLevel, pub cookie: c_uint, - pub message: &'ll Twine, - pub instruction: Option<&'ll Value>, + pub message: String, + pub source: Option<(String, Vec<InnerSpan>)>, } -impl InlineAsmDiagnostic<'ll> { - unsafe fn unpack(di: &'ll DiagnosticInfo) -> Self { +impl InlineAsmDiagnostic { + unsafe fn unpackInlineAsm(di: &'ll DiagnosticInfo) -> Self { let mut cookie = 0; let mut message = None; - let mut instruction = None; let mut level = super::DiagnosticLevel::Error; - super::LLVMRustUnpackInlineAsmDiagnostic( - di, - &mut level, - &mut cookie, - &mut message, - &mut instruction, - ); + super::LLVMRustUnpackInlineAsmDiagnostic(di, &mut level, &mut cookie, &mut message); - InlineAsmDiagnostic { level, cookie, message: message.unwrap(), instruction } + InlineAsmDiagnostic { + level, + cookie, + message: super::twine_to_string(message.unwrap()), + source: None, + } + } + + unsafe fn unpackSrcMgr(di: &'ll DiagnosticInfo) -> Self { + let mut cookie = 0; + let smdiag = SrcMgrDiagnostic::unpack(super::LLVMRustGetSMDiagnostic(di, &mut cookie)); + InlineAsmDiagnostic { + level: smdiag.level, + cookie, + message: smdiag.message, + source: smdiag.source, + } } } pub enum Diagnostic<'ll> { Optimization(OptimizationDiagnostic<'ll>), - InlineAsm(InlineAsmDiagnostic<'ll>), + InlineAsm(InlineAsmDiagnostic), PGO(&'ll DiagnosticInfo), Linker(&'ll DiagnosticInfo), Unsupported(&'ll DiagnosticInfo), @@ -130,7 +186,7 @@ impl Diagnostic<'ll> { let kind = super::LLVMRustGetDiagInfoKind(di); match kind { - Dk::InlineAsm => InlineAsm(InlineAsmDiagnostic::unpack(di)), + Dk::InlineAsm => InlineAsm(InlineAsmDiagnostic::unpackInlineAsm(di)), Dk::OptimizationRemark => { Optimization(OptimizationDiagnostic::unpack(OptimizationRemark, di)) @@ -162,6 +218,8 @@ impl Diagnostic<'ll> { Dk::Linker => Linker(di), Dk::Unsupported => Unsupported(di), + Dk::SrcMgr => InlineAsm(InlineAsmDiagnostic::unpackSrcMgr(di)), + _ => UnknownDiagnostic(di), } } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 68d566cca09..436d906827b 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -490,6 +490,7 @@ pub enum DiagnosticKind { PGOProfile, Linker, Unsupported, + SrcMgr, } /// LLVMRustDiagnosticLevel @@ -788,7 +789,7 @@ pub mod coverageinfo { start_line, start_col, end_line, - end_col: ((1 as u32) << 31) | end_col, + end_col: (1_u32 << 31) | end_col, kind: RegionKind::GapRegion, } } @@ -1011,7 +1012,8 @@ extern "C" { pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value; // Constant expressions - pub fn LLVMConstInBoundsGEP( + pub fn LLVMRustConstInBoundsGEP2( + ty: &'a Type, ConstantVal: &'a Value, ConstantIndices: *const &'a Value, NumIndices: c_uint, @@ -1154,6 +1156,7 @@ extern "C" { ) -> &'a Value; pub fn LLVMRustBuildInvoke( B: &Builder<'a>, + Ty: &'a Type, Fn: &'a Value, Args: *const &'a Value, NumArgs: c_uint, @@ -1394,22 +1397,25 @@ extern "C" { pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value; - pub fn LLVMBuildGEP( + pub fn LLVMBuildGEP2( B: &Builder<'a>, + Ty: &'a Type, Pointer: &'a Value, Indices: *const &'a Value, NumIndices: c_uint, Name: *const c_char, ) -> &'a Value; - pub fn LLVMBuildInBoundsGEP( + pub fn LLVMBuildInBoundsGEP2( B: &Builder<'a>, + Ty: &'a Type, Pointer: &'a Value, Indices: *const &'a Value, NumIndices: c_uint, Name: *const c_char, ) -> &'a Value; - pub fn LLVMBuildStructGEP( + pub fn LLVMBuildStructGEP2( B: &Builder<'a>, + Ty: &'a Type, Pointer: &'a Value, Idx: c_uint, Name: *const c_char, @@ -1522,6 +1528,7 @@ extern "C" { pub fn LLVMRustGetInstrProfIncrementIntrinsic(M: &Module) -> &'a Value; pub fn LLVMRustBuildCall( B: &Builder<'a>, + Ty: &'a Type, Fn: &'a Value, Args: *const &'a Value, NumArgs: c_uint, @@ -2258,13 +2265,17 @@ extern "C" { level_out: &mut DiagnosticLevel, cookie_out: &mut c_uint, message_out: &mut Option<&'a Twine>, - instruction_out: &mut Option<&'a Value>, ); #[allow(improper_ctypes)] pub fn LLVMRustWriteDiagnosticInfoToString(DI: &DiagnosticInfo, s: &RustString); pub fn LLVMRustGetDiagInfoKind(DI: &DiagnosticInfo) -> DiagnosticKind; + pub fn LLVMRustGetSMDiagnostic( + DI: &'a DiagnosticInfo, + cookie_out: &mut c_uint, + ) -> &'a SMDiagnostic; + pub fn LLVMRustSetInlineAsmDiagnosticHandler( C: &Context, H: InlineAsmDiagHandler, @@ -2366,12 +2377,8 @@ extern "C" { len: usize, out_len: &mut usize, ) -> *const u8; - pub fn LLVMRustThinLTOGetDICompileUnit( - M: &Module, - CU1: &mut *mut c_void, - CU2: &mut *mut c_void, - ); - pub fn LLVMRustThinLTOPatchDICompileUnit(M: &Module, CU: *mut c_void); + pub fn LLVMRustLTOGetDICompileUnit(M: &Module, CU1: &mut *mut c_void, CU2: &mut *mut c_void); + pub fn LLVMRustLTOPatchDICompileUnit(M: &Module, CU: *mut c_void); pub fn LLVMRustLinkerNew(M: &'a Module) -> &'a mut Linker<'a>; pub fn LLVMRustLinkerAdd( diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index cb9c6269b66..c5deb11edd0 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -89,13 +89,14 @@ unsafe fn configure_llvm(sess: &Session) { add("-generate-arange-section", false); } - // FIXME(nagisa): disable the machine outliner by default in LLVM versions 11, where it was - // introduced and up. + // Disable the machine outliner by default in LLVM versions 11 and LLVM + // version 12, where it leads to miscompilation. // - // This should remain in place until https://reviews.llvm.org/D103167 is fixed. If LLVM - // has been upgraded since, consider adjusting the version check below to contain an upper - // bound. - if llvm_util::get_version() >= (11, 0, 0) { + // Ref: + // - https://github.com/rust-lang/rust/issues/85351 + // - https://reviews.llvm.org/D103167 + let llvm_version = llvm_util::get_version(); + if llvm_version >= (11, 0, 0) && llvm_version < (13, 0, 0) { add("-enable-machine-outliner=never", false); } @@ -165,25 +166,32 @@ pub fn time_trace_profiler_finish(file_name: &str) { // Though note that Rust can also be build with an external precompiled version of LLVM // which might lead to failures if the oldest tested / supported LLVM version // doesn't yet support the relevant intrinsics -pub fn to_llvm_feature<'a>(sess: &Session, s: &'a str) -> &'a str { +pub fn to_llvm_feature<'a>(sess: &Session, s: &'a str) -> Vec<&'a str> { let arch = if sess.target.arch == "x86_64" { "x86" } else { &*sess.target.arch }; match (arch, s) { - ("x86", "pclmulqdq") => "pclmul", - ("x86", "rdrand") => "rdrnd", - ("x86", "bmi1") => "bmi", - ("x86", "cmpxchg16b") => "cx16", - ("x86", "avx512vaes") => "vaes", - ("x86", "avx512gfni") => "gfni", - ("x86", "avx512vpclmulqdq") => "vpclmulqdq", - ("aarch64", "fp") => "fp-armv8", - ("aarch64", "fp16") => "fullfp16", - ("aarch64", "fhm") => "fp16fml", - ("aarch64", "rcpc2") => "rcpc-immo", - ("aarch64", "dpb") => "ccpp", - ("aarch64", "dpb2") => "ccdp", - ("aarch64", "frintts") => "fptoint", - ("aarch64", "fcma") => "complxnum", - (_, s) => s, + ("x86", "sse4.2") => { + if get_version() >= (14, 0, 0) { + vec!["sse4.2", "crc32"] + } else { + vec!["sse4.2"] + } + } + ("x86", "pclmulqdq") => vec!["pclmul"], + ("x86", "rdrand") => vec!["rdrnd"], + ("x86", "bmi1") => vec!["bmi"], + ("x86", "cmpxchg16b") => vec!["cx16"], + ("x86", "avx512vaes") => vec!["vaes"], + ("x86", "avx512gfni") => vec!["gfni"], + ("x86", "avx512vpclmulqdq") => vec!["vpclmulqdq"], + ("aarch64", "fp") => vec!["fp-armv8"], + ("aarch64", "fp16") => vec!["fullfp16"], + ("aarch64", "fhm") => vec!["fp16fml"], + ("aarch64", "rcpc2") => vec!["rcpc-immo"], + ("aarch64", "dpb") => vec!["ccpp"], + ("aarch64", "dpb2") => vec!["ccdp"], + ("aarch64", "frintts") => vec!["fptoint"], + ("aarch64", "fcma") => vec!["complxnum"], + (_, s) => vec![s], } } @@ -197,9 +205,13 @@ pub fn target_features(sess: &Session) -> Vec<Symbol> { }, ) .filter(|feature| { - let llvm_feature = to_llvm_feature(sess, feature); - let cstr = CString::new(llvm_feature).unwrap(); - unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } + for llvm_feature in to_llvm_feature(sess, feature) { + let cstr = CString::new(llvm_feature).unwrap(); + if unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) } { + return true; + } + } + false }) .map(|feature| Symbol::intern(feature)) .collect() @@ -252,12 +264,19 @@ fn print_target_features(sess: &Session, tm: &llvm::TargetMachine) { let mut rustc_target_features = supported_target_features(sess) .iter() .filter_map(|(feature, _gate)| { - let llvm_feature = to_llvm_feature(sess, *feature); - // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings. - target_features.binary_search_by_key(&llvm_feature, |(f, _d)| *f).ok().map(|index| { - let (_f, desc) = target_features.remove(index); - (*feature, desc) - }) + for llvm_feature in to_llvm_feature(sess, *feature) { + // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings. + match target_features.binary_search_by_key(&llvm_feature, |(f, _d)| (*f)).ok().map( + |index| { + let (_f, desc) = target_features.remove(index); + (*feature, desc) + }, + ) { + Some(v) => return Some(v), + None => {} + } + } + None }) .collect::<Vec<_>>(); rustc_target_features.extend_from_slice(&[( @@ -279,7 +298,7 @@ fn print_target_features(sess: &Session, tm: &llvm::TargetMachine) { for (feature, desc) in &target_features { println!(" {1:0$} - {2}.", max_feature_len, feature, desc); } - if target_features.len() == 0 { + if target_features.is_empty() { println!(" Target features listing is not supported by this LLVM version."); } println!("\nUse +feature to enable a feature, or -feature to disable it."); @@ -365,37 +384,37 @@ pub fn llvm_global_features(sess: &Session) -> Vec<String> { features_string }; - features.extend(features_string.split(",").map(String::from)); + features.extend(features_string.split(',').map(String::from)); } Some(_) | None => {} }; let filter = |s: &str| { if s.is_empty() { - return None; + return vec![]; } - let feature = if s.starts_with("+") || s.starts_with("-") { + let feature = if s.starts_with('+') || s.starts_with('-') { &s[1..] } else { - return Some(s.to_string()); + return vec![s.to_string()]; }; // Rustc-specific feature requests like `+crt-static` or `-crt-static` // are not passed down to LLVM. if RUSTC_SPECIFIC_FEATURES.contains(&feature) { - return None; + return vec![]; } // ... otherwise though we run through `to_llvm_feature` feature when // passing requests down to LLVM. This means that all in-language // features also work on the command line instead of having two // different names when the LLVM name and the Rust name differ. - Some(format!("{}{}", &s[..1], to_llvm_feature(sess, feature))) + to_llvm_feature(sess, feature).iter().map(|f| format!("{}{}", &s[..1], f)).collect() }; // Features implied by an implicit or explicit `--target`. - features.extend(sess.target.features.split(',').filter_map(&filter)); + features.extend(sess.target.features.split(',').flat_map(&filter)); // -Ctarget-features - features.extend(sess.opts.cg.target_feature.split(',').filter_map(&filter)); + features.extend(sess.opts.cg.target_feature.split(',').flat_map(&filter)); features } diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs index 93456443aa0..88498cf47d8 100644 --- a/compiler/rustc_codegen_llvm/src/mono_item.rs +++ b/compiler/rustc_codegen_llvm/src/mono_item.rs @@ -1,4 +1,3 @@ -use crate::abi::FnAbi; use crate::attributes; use crate::base; use crate::context::CodegenCx; @@ -8,10 +7,9 @@ use rustc_codegen_ssa::traits::*; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; pub use rustc_middle::mir::mono::MonoItem; use rustc_middle::mir::mono::{Linkage, Visibility}; -use rustc_middle::ty::layout::FnAbiExt; +use rustc_middle::ty::layout::{FnAbiOf, LayoutOf}; use rustc_middle::ty::{self, Instance, TypeFoldable}; use rustc_session::config::CrateType; -use rustc_target::abi::LayoutOf; use rustc_target::spec::RelocModel; use tracing::debug; @@ -54,11 +52,11 @@ impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> { ) { assert!(!instance.substs.needs_infer()); - let fn_abi = FnAbi::of_instance(self, instance, &[]); - let lldecl = self.declare_fn(symbol_name, &fn_abi); + let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); + let lldecl = self.declare_fn(symbol_name, fn_abi); unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); - base::set_link_section(lldecl, &attrs); + base::set_link_section(lldecl, attrs); if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR { llvm::SetUniqueComdat(self.llmod, lldecl); } @@ -135,11 +133,18 @@ impl CodegenCx<'ll, 'tcx> { return false; } + // Match clang by only supporting COFF and ELF for now. + if self.tcx.sess.target.is_like_osx { + return false; + } + // Static relocation model should force copy relocations everywhere. if self.tcx.sess.relocation_model() == RelocModel::Static { return true; } - return false; + // With pie relocation model calls of functions defined in the translation + // unit can use copy relocations. + self.tcx.sess.relocation_model() == RelocModel::Pie && !is_declaration } } diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index 8fd0caae479..2ae0a08f192 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -203,7 +203,11 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn element_type(&self, ty: &'ll Type) -> &'ll Type { - unsafe { llvm::LLVMGetElementType(ty) } + match self.type_kind(ty) { + TypeKind::Array | TypeKind::Vector => unsafe { llvm::LLVMGetElementType(ty) }, + TypeKind::Pointer => bug!("element_type is not supported for opaque pointers"), + other => bug!("element_type called on unsupported type {:?}", other), + } } fn vector_length(&self, ty: &'ll Type) -> usize { @@ -244,7 +248,7 @@ impl Type { } fn ptr_to(&self, address_space: AddressSpace) -> &Type { - unsafe { llvm::LLVMPointerType(&self, address_space.0) } + unsafe { llvm::LLVMPointerType(self, address_space.0) } } } @@ -262,7 +266,7 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { layout.is_llvm_scalar_pair() } fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 { - layout.llvm_field_index(index) + layout.llvm_field_index(self, index) } fn scalar_pair_element_backend_type( &self, @@ -275,6 +279,9 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type { ty.llvm_type(self) } + fn fn_decl_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type { + fn_abi.llvm_type(self) + } fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type { fn_abi.ptr_to_llvm_type(self) } diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 0876907e119..f8c919ec2aa 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -1,14 +1,15 @@ -use crate::abi::FnAbi; use crate::common::*; +use crate::context::TypeLowering; use crate::type_::Type; use rustc_codegen_ssa::traits::*; use rustc_middle::bug; -use rustc_middle::ty::layout::{FnAbiExt, TyAndLayout}; -use rustc_middle::ty::print::with_no_trimmed_paths; +use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; +use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, Ty, TypeFoldable}; use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape}; use rustc_target::abi::{Int, Pointer, F32, F64}; -use rustc_target::abi::{LayoutOf, PointeeInfo, Scalar, Size, TyAndLayoutMethods, Variants}; +use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants}; +use smallvec::{smallvec, SmallVec}; use tracing::debug; use std::fmt::Write; @@ -17,10 +18,11 @@ fn uncached_llvm_type<'a, 'tcx>( cx: &CodegenCx<'a, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>, + field_remapping: &mut Option<SmallVec<[u32; 4]>>, ) -> &'a Type { match layout.abi { Abi::Scalar(_) => bug!("handled elsewhere"), - Abi::Vector { ref element, count } => { + Abi::Vector { element, count } => { let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); return cx.type_vector(element, count); } @@ -41,7 +43,8 @@ fn uncached_llvm_type<'a, 'tcx>( // in problematically distinct types due to HRTB and subtyping (see #47638). // ty::Dynamic(..) | ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str => { - let mut name = with_no_trimmed_paths(|| layout.ty.to_string()); + let mut name = + with_no_visible_paths(|| with_no_trimmed_paths(|| layout.ty.to_string())); if let (&ty::Adt(def, _), &Variants::Single { index }) = (layout.ty.kind(), &layout.variants) { @@ -75,7 +78,8 @@ fn uncached_llvm_type<'a, 'tcx>( FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count), FieldsShape::Arbitrary { .. } => match name { None => { - let (llfields, packed) = struct_llfields(cx, layout); + let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout); + *field_remapping = new_field_remapping; cx.type_struct(&llfields, packed) } Some(ref name) => { @@ -90,7 +94,7 @@ fn uncached_llvm_type<'a, 'tcx>( fn struct_llfields<'a, 'tcx>( cx: &CodegenCx<'a, 'tcx>, layout: TyAndLayout<'tcx>, -) -> (Vec<&'a Type>, bool) { +) -> (Vec<&'a Type>, bool, Option<SmallVec<[u32; 4]>>) { debug!("struct_llfields: {:#?}", layout); let field_count = layout.fields.count(); @@ -98,6 +102,7 @@ fn struct_llfields<'a, 'tcx>( let mut offset = Size::ZERO; let mut prev_effective_align = layout.align.abi; let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2); + let mut field_remapping = smallvec![0; field_count]; for i in layout.fields.index_by_increasing_offset() { let target_offset = layout.fields.offset(i as usize); let field = layout.field(cx, i); @@ -116,33 +121,37 @@ fn struct_llfields<'a, 'tcx>( ); assert!(target_offset >= offset); let padding = target_offset - offset; - let padding_align = prev_effective_align.min(effective_field_align); - assert_eq!(offset.align_to(padding_align) + padding, target_offset); - result.push(cx.type_padding_filler(padding, padding_align)); - debug!(" padding before: {:?}", padding); - + if padding != Size::ZERO { + let padding_align = prev_effective_align.min(effective_field_align); + assert_eq!(offset.align_to(padding_align) + padding, target_offset); + result.push(cx.type_padding_filler(padding, padding_align)); + debug!(" padding before: {:?}", padding); + } + field_remapping[i] = result.len() as u32; result.push(field.llvm_type(cx)); offset = target_offset + field.size; prev_effective_align = effective_field_align; } + let padding_used = result.len() > field_count; if !layout.is_unsized() && field_count > 0 { if offset > layout.size { bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset); } let padding = layout.size - offset; - let padding_align = prev_effective_align; - assert_eq!(offset.align_to(padding_align) + padding, layout.size); - debug!( - "struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", - padding, offset, layout.size - ); - result.push(cx.type_padding_filler(padding, padding_align)); - assert_eq!(result.len(), 1 + field_count * 2); + if padding != Size::ZERO { + let padding_align = prev_effective_align; + assert_eq!(offset.align_to(padding_align) + padding, layout.size); + debug!( + "struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", + padding, offset, layout.size + ); + result.push(cx.type_padding_filler(padding, padding_align)); + } } else { debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size); } - - (result, packed) + let field_remapping = if padding_used { Some(field_remapping) } else { None }; + (result, packed, field_remapping) } impl<'a, 'tcx> CodegenCx<'a, 'tcx> { @@ -168,7 +177,7 @@ pub trait LayoutLlvmExt<'tcx> { fn scalar_llvm_type_at<'a>( &self, cx: &CodegenCx<'a, 'tcx>, - scalar: &Scalar, + scalar: Scalar, offset: Size, ) -> &'a Type; fn scalar_pair_element_llvm_type<'a>( @@ -177,7 +186,7 @@ pub trait LayoutLlvmExt<'tcx> { index: usize, immediate: bool, ) -> &'a Type; - fn llvm_field_index(&self, index: usize) -> u64; + fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64; fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo>; } @@ -209,7 +218,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { - if let Abi::Scalar(ref scalar) = self.abi { + if let Abi::Scalar(scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs // can be either fat or thin (data pointers of fat pointers). if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) { @@ -222,7 +231,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { ty::Adt(def, _) if def.is_box() => { cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) } - ty::FnPtr(sig) => cx.fn_ptr_backend_type(&FnAbi::of_fn_ptr(cx, sig, &[])), + ty::FnPtr(sig) => { + cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty())) + } _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO), }; cx.scalar_lltypes.borrow_mut().insert(self.ty, llty); @@ -234,8 +245,8 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { Variants::Single { index } => Some(index), _ => None, }; - if let Some(&llty) = cx.lltypes.borrow().get(&(self.ty, variant_index)) { - return llty; + if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) { + return llty.lltype; } debug!("llvm_type({:#?})", self); @@ -247,6 +258,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { let normal_ty = cx.tcx.erase_regions(self.ty); let mut defer = None; + let mut field_remapping = None; let llty = if self.ty != normal_ty { let mut layout = cx.layout_of(normal_ty); if let Some(v) = variant_index { @@ -254,22 +266,28 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { } layout.llvm_type(cx) } else { - uncached_llvm_type(cx, *self, &mut defer) + uncached_llvm_type(cx, *self, &mut defer, &mut field_remapping) }; debug!("--> mapped {:#?} to llty={:?}", self, llty); - cx.lltypes.borrow_mut().insert((self.ty, variant_index), llty); + cx.type_lowering + .borrow_mut() + .insert((self.ty, variant_index), TypeLowering { lltype: llty, field_remapping }); if let Some((llty, layout)) = defer { - let (llfields, packed) = struct_llfields(cx, layout); - cx.set_struct_body(llty, &llfields, packed) + let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout); + cx.set_struct_body(llty, &llfields, packed); + cx.type_lowering + .borrow_mut() + .get_mut(&(self.ty, variant_index)) + .unwrap() + .field_remapping = new_field_remapping; } - llty } fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { - if let Abi::Scalar(ref scalar) = self.abi { + if let Abi::Scalar(scalar) = self.abi { if scalar.is_bool() { return cx.type_i1(); } @@ -280,7 +298,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn scalar_llvm_type_at<'a>( &self, cx: &CodegenCx<'a, 'tcx>, - scalar: &Scalar, + scalar: Scalar, offset: Size, ) -> &'a Type { match scalar.value { @@ -320,7 +338,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { } let (a, b) = match self.abi { - Abi::ScalarPair(ref a, ref b) => (a, b), + Abi::ScalarPair(a, b) => (a, b), _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self), }; let scalar = [a, b][index]; @@ -340,7 +358,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { self.scalar_llvm_type_at(cx, scalar, offset) } - fn llvm_field_index(&self, index: usize) -> u64 { + fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 { match self.abi { Abi::Scalar(_) | Abi::ScalarPair(..) => { bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self) @@ -354,16 +372,37 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { FieldsShape::Array { .. } => index as u64, - FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2, + FieldsShape::Arbitrary { .. } => { + let variant_index = match self.variants { + Variants::Single { index } => Some(index), + _ => None, + }; + + // Look up llvm field if indexes do not match memory order due to padding. If + // `field_remapping` is `None` no padding was used and the llvm field index + // matches the memory index. + match cx.type_lowering.borrow().get(&(self.ty, variant_index)) { + Some(TypeLowering { field_remapping: Some(ref remap), .. }) => { + remap[index] as u64 + } + Some(_) => self.fields.memory_index(index) as u64, + None => { + bug!("TyAndLayout::llvm_field_index({:?}): type info not found", self) + } + } + } } } + // FIXME(eddyb) this having the same name as `TyAndLayout::pointee_info_at` + // (the inherent method, which is lacking this caching logic) can result in + // the uncached version being called - not wrong, but potentially inefficient. fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> { if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) { return pointee; } - let result = Ty::pointee_info_at(*self, cx, offset); + let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset); cx.pointee_infos.borrow_mut().insert((self.ty, offset), result); result diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 9df1bd7d1d9..591f659f11b 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -7,9 +7,9 @@ use rustc_codegen_ssa::{ common::IntPredicate, traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}, }; -use rustc_middle::ty::layout::HasTyCtxt; +use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; use rustc_middle::ty::Ty; -use rustc_target::abi::{Align, Endian, HasDataLayout, LayoutOf, Size}; +use rustc_target::abi::{Align, Endian, HasDataLayout, Size}; fn round_pointer_up_to_alignment( bx: &mut Builder<'a, 'll, 'tcx>, @@ -50,12 +50,12 @@ fn emit_direct_ptr_va_arg( let aligned_size = size.align_to(slot_size).bytes() as i32; let full_direct_size = bx.cx().const_i32(aligned_size); - let next = bx.inbounds_gep(addr, &[full_direct_size]); + let next = bx.inbounds_gep(bx.type_i8(), addr, &[full_direct_size]); bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi); if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big { let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); - let adjusted = bx.inbounds_gep(addr, &[adjusted_size]); + let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]); (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align) } else { (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align) @@ -98,6 +98,8 @@ fn emit_aapcs_va_arg( // Implementation of the AAPCS64 calling convention for va_args see // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst let va_list_addr = list.immediate(); + let va_list_layout = list.deref(bx.cx).layout; + let va_list_ty = va_list_layout.llvm_type(bx); let layout = bx.cx.layout_of(target_ty); let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg"); @@ -109,19 +111,21 @@ fn emit_aapcs_va_arg( let gr_type = target_ty.is_any_ptr() || target_ty.is_integral(); let (reg_off, reg_top_index, slot_size) = if gr_type { - let gr_offs = bx.struct_gep(va_list_addr, 7); + let gr_offs = + bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3)); let nreg = (layout.size.bytes() + 7) / 8; - (gr_offs, 3, nreg * 8) + (gr_offs, va_list_layout.llvm_field_index(bx.cx, 1), nreg * 8) } else { - let vr_off = bx.struct_gep(va_list_addr, 9); + let vr_off = + bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 4)); let nreg = (layout.size.bytes() + 15) / 16; - (vr_off, 5, nreg * 16) + (vr_off, va_list_layout.llvm_field_index(bx.cx, 2), nreg * 16) }; // if the offset >= 0 then the value will be on the stack let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align); let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero); - bx.cond_br(use_stack, &on_stack.llbb(), &maybe_reg.llbb()); + bx.cond_br(use_stack, on_stack.llbb(), maybe_reg.llbb()); // The value at this point might be in a register, but there is a chance that // it could be on the stack so we have to update the offset and then check @@ -138,33 +142,33 @@ fn emit_aapcs_va_arg( // Check to see if we have overflowed the registers as a result of this. // If we have then we need to use the stack for this value let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero); - maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb()); + maybe_reg.cond_br(use_stack, on_stack.llbb(), in_reg.llbb()); let top_type = bx.type_i8p(); - let top = in_reg.struct_gep(va_list_addr, reg_top_index); + let top = in_reg.struct_gep(va_list_ty, va_list_addr, reg_top_index); let top = in_reg.load(top_type, top, bx.tcx().data_layout.pointer_align.abi); // reg_value = *(@top + reg_off_v); - let mut reg_addr = in_reg.gep(top, &[reg_off_v]); + let mut reg_addr = in_reg.gep(bx.type_i8(), top, &[reg_off_v]); if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size { // On big-endian systems the value is right-aligned in its slot. let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32); - reg_addr = in_reg.gep(reg_addr, &[offset]); + reg_addr = in_reg.gep(bx.type_i8(), reg_addr, &[offset]); } let reg_type = layout.llvm_type(bx); let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type)); let reg_value = in_reg.load(reg_type, reg_addr, layout.align.abi); - in_reg.br(&end.llbb()); + in_reg.br(end.llbb()); // On Stack block let stack_value = emit_ptr_va_arg(&mut on_stack, list, target_ty, false, Align::from_bytes(8).unwrap(), true); - on_stack.br(&end.llbb()); + on_stack.br(end.llbb()); let val = end.phi( layout.immediate_llvm_type(bx), &[reg_value, stack_value], - &[&in_reg.llbb(), &on_stack.llbb()], + &[in_reg.llbb(), on_stack.llbb()], ); *bx = end; |
