diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
24 files changed, 456 insertions, 148 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index d478efc863a..a6fd2a7de6b 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -398,23 +398,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } fn llvm_cconv(&self) -> llvm::CallConv { - match self.conv { - Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv, - Conv::RustCold => llvm::ColdCallConv, - Conv::AmdGpuKernel => llvm::AmdGpuKernel, - Conv::AvrInterrupt => llvm::AvrInterrupt, - Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt, - Conv::ArmAapcs => llvm::ArmAapcsCallConv, - Conv::Msp430Intr => llvm::Msp430Intr, - Conv::PtxKernel => llvm::PtxKernel, - Conv::X86Fastcall => llvm::X86FastcallCallConv, - Conv::X86Intr => llvm::X86_Intr, - Conv::X86Stdcall => llvm::X86StdcallCallConv, - Conv::X86ThisCall => llvm::X86_ThisCall, - Conv::X86VectorCall => llvm::X86_VectorCall, - Conv::X86_64SysV => llvm::X86_64_SysV, - Conv::X86_64Win64 => llvm::X86_64_Win64, - } + self.conv.into() } fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) { @@ -596,3 +580,25 @@ impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { llvm::get_param(self.llfn(), index as c_uint) } } + +impl From<Conv> for llvm::CallConv { + fn from(conv: Conv) -> Self { + match conv { + Conv::C | Conv::Rust | Conv::CCmseNonSecureCall => llvm::CCallConv, + Conv::RustCold => llvm::ColdCallConv, + Conv::AmdGpuKernel => llvm::AmdGpuKernel, + Conv::AvrInterrupt => llvm::AvrInterrupt, + Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt, + Conv::ArmAapcs => llvm::ArmAapcsCallConv, + Conv::Msp430Intr => llvm::Msp430Intr, + Conv::PtxKernel => llvm::PtxKernel, + Conv::X86Fastcall => llvm::X86FastcallCallConv, + Conv::X86Intr => llvm::X86_Intr, + Conv::X86Stdcall => llvm::X86StdcallCallConv, + Conv::X86ThisCall => llvm::X86_ThisCall, + Conv::X86VectorCall => llvm::X86_VectorCall, + Conv::X86_64SysV => llvm::X86_64_SysV, + Conv::X86_64Win64 => llvm::X86_64_Win64, + } + } +} diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index 72961ae888e..fed56cdd438 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -15,7 +15,7 @@ pub(crate) unsafe fn codegen( module_llvm: &mut ModuleLlvm, module_name: &str, kind: AllocatorKind, - has_alloc_error_handler: bool, + alloc_error_handler_kind: AllocatorKind, ) { let llcx = &*module_llvm.llcx; let llmod = module_llvm.llmod(); @@ -117,8 +117,7 @@ pub(crate) unsafe fn codegen( attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]); } - let kind = if has_alloc_error_handler { AllocatorKind::Global } else { AllocatorKind::Default }; - let callee = kind.fn_name(sym::oom); + let callee = alloc_error_handler_kind.fn_name(sym::oom); let callee = llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty); // -> ! DIFlagNoReturn attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]); diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index e723187ff1f..219a4f8fa89 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -130,7 +130,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { op_idx.insert(idx, constraints.len()); constraints.push(reg_to_llvm(reg, Some(&value.layout))); } - InlineAsmOperandRef::InOut { reg, late: _, in_value, out_place: _ } => { + InlineAsmOperandRef::InOut { reg, late, in_value, out_place: _ } => { let value = llvm_fixup_input( self, in_value.immediate(), @@ -138,7 +138,16 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { &in_value.layout, ); inputs.push(value); - constraints.push(format!("{}", op_idx[&idx])); + + // In the case of fixed registers, we have the choice of + // either using a tied operand or duplicating the constraint. + // We prefer the latter because it matches the behavior of + // Clang. + if late && matches!(reg, InlineAsmRegOrRegClass::Reg(_)) { + constraints.push(format!("{}", reg_to_llvm(reg, Some(&in_value.layout)))); + } else { + constraints.push(format!("{}", op_idx[&idx])); + } } InlineAsmOperandRef::SymFn { instance } => { inputs.push(self.cx.get_fn(instance)); @@ -276,13 +285,13 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { let mut attrs = SmallVec::<[_; 2]>::new(); if options.contains(InlineAsmOptions::PURE) { if options.contains(InlineAsmOptions::NOMEM) { - attrs.push(llvm::AttributeKind::ReadNone.create_attr(self.cx.llcx)); + attrs.push(llvm::MemoryEffects::None.create_attr(self.cx.llcx)); } else if options.contains(InlineAsmOptions::READONLY) { - attrs.push(llvm::AttributeKind::ReadOnly.create_attr(self.cx.llcx)); + attrs.push(llvm::MemoryEffects::ReadOnly.create_attr(self.cx.llcx)); } attrs.push(llvm::AttributeKind::WillReturn.create_attr(self.cx.llcx)); } else if options.contains(InlineAsmOptions::NOMEM) { - attrs.push(llvm::AttributeKind::InaccessibleMemOnly.create_attr(self.cx.llcx)); + attrs.push(llvm::MemoryEffects::InaccessibleMemOnly.create_attr(self.cx.llcx)); } else { // LLVM doesn't have an attribute to represent ReadOnly + SideEffect } @@ -496,6 +505,44 @@ fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> { } } +/// If the register is an AArch64 integer register then return its index. +fn a64_reg_index(reg: InlineAsmReg) -> Option<u32> { + match reg { + InlineAsmReg::AArch64(AArch64InlineAsmReg::x0) => Some(0), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x1) => Some(1), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x2) => Some(2), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x3) => Some(3), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x4) => Some(4), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x5) => Some(5), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x6) => Some(6), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x7) => Some(7), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x8) => Some(8), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x9) => Some(9), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x10) => Some(10), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x11) => Some(11), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x12) => Some(12), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x13) => Some(13), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x14) => Some(14), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x15) => Some(15), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x16) => Some(16), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x17) => Some(17), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x18) => Some(18), + // x19 is reserved + InlineAsmReg::AArch64(AArch64InlineAsmReg::x20) => Some(20), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x21) => Some(21), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x22) => Some(22), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x23) => Some(23), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x24) => Some(24), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x25) => Some(25), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x26) => Some(26), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x27) => Some(27), + InlineAsmReg::AArch64(AArch64InlineAsmReg::x28) => Some(28), + // x29 is reserved + InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) => Some(30), + _ => None, + } +} + /// If the register is an AArch64 vector register then return its index. fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> { match reg { @@ -526,6 +573,22 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> 'x' }; format!("{{{}mm{}}}", class, idx) + } else if let Some(idx) = a64_reg_index(reg) { + let class = if let Some(layout) = layout { + match layout.size.bytes() { + 8 => 'x', + _ => 'w', + } + } else { + // We use i32 as the type for discarded outputs + 'w' + }; + if class == 'x' && reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) { + // LLVM doesn't recognize x30. use lr instead. + "{lr}".to_string() + } else { + format!("{{{}{}}}", class, idx) + } } else if let Some(idx) = a64_vreg_index(reg) { let class = if let Some(layout) = layout { match layout.size.bytes() { @@ -541,9 +604,6 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> 'q' }; format!("{{{}{}}}", class, idx) - } else if reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) { - // LLVM doesn't recognize x30 - "{lr}".to_string() } else if reg == InlineAsmReg::Arm(ArmInlineAsmReg::r14) { // LLVM doesn't recognize r14 "{lr}".to_string() @@ -551,6 +611,8 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> format!("{{{}}}", reg.name()) } } + // The constraints can be retrieved from + // https://llvm.org/docs/LangRef.html#supported-constraint-code-list InlineAsmRegOrRegClass::RegClass(reg) => match reg { InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r", InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w", @@ -624,6 +686,8 @@ fn modifier_to_llvm( reg: InlineAsmRegClass, modifier: Option<char>, ) -> Option<char> { + // The modifiers can be retrieved from + // https://llvm.org/docs/LangRef.html#asm-template-argument-modifiers match reg { InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier, InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index eff2436d41c..a8b47633519 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -12,8 +12,9 @@ use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtec use smallvec::SmallVec; use crate::attributes; +use crate::errors::{MissingFeatures, SanitizerMemtagRequiresMte, TargetFeatureDisableOrEnable}; use crate::llvm::AttributePlace::Function; -use crate::llvm::{self, AllocKindFlags, Attribute, AttributeKind, AttributePlace}; +use crate::llvm::{self, AllocKindFlags, Attribute, AttributeKind, AttributePlace, MemoryEffects}; use crate::llvm_util; pub use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr}; @@ -82,7 +83,7 @@ pub fn sanitize_attrs<'ll>( let mte_feature = features.iter().map(|s| &s[..]).rfind(|n| ["+mte", "-mte"].contains(&&n[..])); if let None | Some("-mte") = mte_feature { - cx.tcx.sess.err("`-Zsanitizer=memtag` requires `-Ctarget-feature=+mte`"); + cx.tcx.sess.emit_err(SanitizerMemtagRequiresMte); } attrs.push(llvm::AttributeKind::SanitizeMemTag.create_attr(cx.llcx)); @@ -303,10 +304,10 @@ pub fn from_fn_attrs<'ll, 'tcx>( to_add.push(AttributeKind::ReturnsTwice.create_attr(cx.llcx)); } if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) { - to_add.push(AttributeKind::ReadOnly.create_attr(cx.llcx)); + to_add.push(MemoryEffects::ReadOnly.create_attr(cx.llcx)); } if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) { - to_add.push(AttributeKind::ReadNone.create_attr(cx.llcx)); + to_add.push(MemoryEffects::None.create_attr(cx.llcx)); } if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { to_add.push(AttributeKind::Naked.create_attr(cx.llcx)); @@ -393,13 +394,14 @@ pub fn from_fn_attrs<'ll, 'tcx>( .get_attrs(instance.def_id(), sym::target_feature) .next() .map_or_else(|| cx.tcx.def_span(instance.def_id()), |a| a.span); - let msg = format!( - "the target features {} must all be either enabled or disabled together", - f.join(", ") - ); - let mut err = cx.tcx.sess.struct_span_err(span, &msg); - err.help("add the missing features in a `target_feature` attribute"); - err.emit(); + cx.tcx + .sess + .create_err(TargetFeatureDisableOrEnable { + features: f, + span: Some(span), + missing_features: Some(MissingFeatures), + }) + .emit(); return; } diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index 20a063f80fd..5c68abeb08b 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -12,6 +12,10 @@ use std::str; use object::read::macho::FatArch; use crate::common; +use crate::errors::{ + ArchiveBuildFailure, DlltoolFailImportLibrary, ErrorCallingDllTool, ErrorCreatingImportLibrary, + ErrorWritingDEFFile, UnknownArchiveKind, +}; use crate::llvm::archive_ro::{ArchiveRO, Child}; use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport}; use rustc_codegen_ssa::back::archive::{ArchiveBuilder, ArchiveBuilderBuilder}; @@ -147,7 +151,7 @@ impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> { fn build(mut self: Box<Self>, output: &Path) -> bool { match self.build_with_llvm(output) { Ok(any_members) => any_members, - Err(e) => self.sess.fatal(&format!("failed to build archive: {}", e)), + Err(e) => self.sess.emit_fatal(ArchiveBuildFailure { error: e }), } } } @@ -165,10 +169,12 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { lib_name: &str, dll_imports: &[DllImport], tmpdir: &Path, + is_direct_dependency: bool, ) -> PathBuf { + let name_suffix = if is_direct_dependency { "_imports" } else { "_imports_indirect" }; let output_path = { let mut output_path: PathBuf = tmpdir.to_path_buf(); - output_path.push(format!("{}_imports", lib_name)); + output_path.push(format!("{}{}", lib_name, name_suffix)); output_path.with_extension("lib") }; @@ -195,7 +201,8 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { // that loaded but crashed with an AV upon calling one of the imported // functions. Therefore, use binutils to create the import library instead, // by writing a .DEF file to the temp dir and calling binutils's dlltool. - let def_file_path = tmpdir.join(format!("{}_imports", lib_name)).with_extension("def"); + let def_file_path = + tmpdir.join(format!("{}{}", lib_name, name_suffix)).with_extension("def"); let def_file_content = format!( "EXPORTS\n{}", @@ -214,7 +221,7 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { match std::fs::write(&def_file_path, def_file_content) { Ok(_) => {} Err(e) => { - sess.fatal(&format!("Error writing .DEF file: {}", e)); + sess.emit_fatal(ErrorWritingDEFFile { error: e }); } }; @@ -236,13 +243,14 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { match result { Err(e) => { - sess.fatal(&format!("Error calling dlltool: {}", e)); + sess.emit_fatal(ErrorCallingDllTool { error: e }); + } + Ok(output) if !output.status.success() => { + sess.emit_fatal(DlltoolFailImportLibrary { + stdout: String::from_utf8_lossy(&output.stdout), + stderr: String::from_utf8_lossy(&output.stderr), + }) } - Ok(output) if !output.status.success() => sess.fatal(&format!( - "Dlltool could not create import library: {}\n{}", - String::from_utf8_lossy(&output.stdout), - String::from_utf8_lossy(&output.stderr) - )), _ => {} } } else { @@ -290,11 +298,10 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { }; if result == crate::llvm::LLVMRustResult::Failure { - sess.fatal(&format!( - "Error creating import library for {}: {}", + sess.emit_fatal(ErrorCreatingImportLibrary { lib_name, - llvm::last_error().unwrap_or("unknown LLVM error".to_string()) - )); + error: llvm::last_error().unwrap_or("unknown LLVM error".to_string()), + }); } }; @@ -305,9 +312,10 @@ impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { impl<'a> LlvmArchiveBuilder<'a> { fn build_with_llvm(&mut self, output: &Path) -> io::Result<bool> { let kind = &*self.sess.target.archive_format; - let kind = kind.parse::<ArchiveKind>().map_err(|_| kind).unwrap_or_else(|kind| { - self.sess.fatal(&format!("Don't know how to build archive of type: {}", kind)) - }); + let kind = kind + .parse::<ArchiveKind>() + .map_err(|_| kind) + .unwrap_or_else(|kind| self.sess.emit_fatal(UnknownArchiveKind { kind })); let mut additions = mem::take(&mut self.additions); let mut strings = Vec::new(); diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 2049422b79a..3fa21355b7f 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -1,4 +1,5 @@ use crate::back::write::{self, save_temp_bitcode, DiagnosticHandlers}; +use crate::errors::DynamicLinkingWithLTO; use crate::llvm::{self, build_string}; use crate::{LlvmCodegenBackend, ModuleLlvm}; use object::read::archive::ArchiveFile; @@ -32,8 +33,8 @@ pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin"; pub fn crate_type_allows_lto(crate_type: CrateType) -> bool { match crate_type { - CrateType::Executable | CrateType::Staticlib | CrateType::Cdylib => true, - CrateType::Dylib | CrateType::Rlib | CrateType::ProcMacro => false, + CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true, + CrateType::Rlib | CrateType::ProcMacro => false, } } @@ -73,17 +74,6 @@ fn prepare_lto( // with either fat or thin LTO let mut upstream_modules = Vec::new(); if cgcx.lto != Lto::ThinLocal { - if cgcx.opts.cg.prefer_dynamic { - diag_handler - .struct_err("cannot prefer dynamic linking when performing LTO") - .note( - "only 'staticlib', 'bin', and 'cdylib' outputs are \ - supported with LTO", - ) - .emit(); - return Err(FatalError); - } - // Make sure we actually can run LTO for crate_type in cgcx.crate_types.iter() { if !crate_type_allows_lto(*crate_type) { @@ -92,9 +82,19 @@ fn prepare_lto( static library outputs", ); return Err(e); + } else if *crate_type == CrateType::Dylib { + if !cgcx.opts.unstable_opts.dylib_lto { + return Err(diag_handler + .fatal("lto cannot be used for `dylib` crate type without `-Zdylib-lto`")); + } } } + if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto { + diag_handler.emit_err(DynamicLinkingWithLTO); + return Err(FatalError); + } + for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() { let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO"); @@ -573,7 +573,7 @@ pub(crate) fn run_pass_manager( module: &mut ModuleCodegen<ModuleLlvm>, thin: bool, ) -> Result<(), FatalError> { - let _timer = cgcx.prof.extra_verbose_generic_activity("LLVM_lto_optimize", &*module.name); + let _timer = cgcx.prof.verbose_generic_activity_with_arg("LLVM_lto_optimize", &*module.name); let config = cgcx.config(module.kind); // Now we have one massive module inside of llmod. Time to run the diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index db526746fa7..97d0de47b3a 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -537,7 +537,7 @@ pub(crate) fn link( mut modules: Vec<ModuleCodegen<ModuleLlvm>>, ) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> { use super::lto::{Linker, ModuleBuffer}; - // Sort the modules by name to ensure to ensure deterministic behavior. + // Sort the modules by name to ensure deterministic behavior. modules.sort_by(|a, b| a.name.cmp(&b.name)); let (first, elements) = modules.split_first().expect("Bug! modules must contain at least one module."); @@ -765,11 +765,21 @@ pub(crate) unsafe fn codegen( drop(handlers); } + // `.dwo` files are only emitted if: + // + // - Object files are being emitted (i.e. bitcode only or metadata only compilations will not + // produce dwarf objects, even if otherwise enabled) + // - Target supports Split DWARF + // - Split debuginfo is enabled + // - Split DWARF kind is `split` (i.e. debuginfo is split into `.dwo` files, not different + // sections in the `.o` files). + let dwarf_object_emitted = matches!(config.emit_obj, EmitObj::ObjectCode(_)) + && cgcx.target_can_use_split_dwarf + && cgcx.split_debuginfo != SplitDebuginfo::Off + && cgcx.split_dwarf_kind == SplitDwarfKind::Split; Ok(module.into_compiled_module( config.emit_obj != EmitObj::None, - cgcx.target_can_use_split_dwarf - && cgcx.split_debuginfo != SplitDebuginfo::Off - && cgcx.split_dwarf_kind == SplitDwarfKind::Split, + dwarf_object_emitted, config.emit_bc, &cgcx.output_filenames, )) diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index fca43a0d86d..77dd15ef4d8 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -365,11 +365,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { Int(I64) => "llvm.ssub.with.overflow.i64", Int(I128) => "llvm.ssub.with.overflow.i128", - Uint(U8) => "llvm.usub.with.overflow.i8", - Uint(U16) => "llvm.usub.with.overflow.i16", - Uint(U32) => "llvm.usub.with.overflow.i32", - Uint(U64) => "llvm.usub.with.overflow.i64", - Uint(U128) => "llvm.usub.with.overflow.i128", + Uint(_) => { + // Emit sub and icmp instead of llvm.usub.with.overflow. LLVM considers these + // to be the canonical form. It will attempt to reform llvm.usub.with.overflow + // in the backend if profitable. + let sub = self.sub(lhs, rhs); + let cmp = self.icmp(IntPredicate::IntULT, lhs, rhs); + return (sub, cmp); + } _ => unreachable!(), }, @@ -553,15 +556,15 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn write_operand_repeatedly( - mut self, + &mut self, cg_elem: OperandRef<'tcx, &'ll Value>, count: u64, dest: PlaceRef<'tcx, &'ll Value>, - ) -> Self { + ) { let zero = self.const_usize(0); let count = self.const_usize(count); - let start = dest.project_index(&mut self, zero).llval; - let end = dest.project_index(&mut self, count).llval; + let start = dest.project_index(self, zero).llval; + let end = dest.project_index(self, count).llval; let header_bb = self.append_sibling_block("repeat_loop_header"); let body_bb = self.append_sibling_block("repeat_loop_body"); @@ -589,7 +592,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { body_bx.br(header_bb); header_bx.add_incoming_to_phi(current, next, body_bb); - Self::build(self.cx, next_bb) + *self = Self::build(self.cx, next_bb); } fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) { diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index b83c1e8f08f..6f0d1b7ce84 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -179,7 +179,8 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> // MinGW: For backward compatibility we rely on the linker to decide whether it // should use dllimport for functions. if cx.use_dll_storage_attrs - && tcx.is_dllimport_foreign_item(instance_def_id) + && let Some(library) = tcx.native_library(instance_def_id) + && library.kind.is_dllimport() && !matches!(tcx.sess.target.env.as_ref(), "gnu" | "uclibc") { llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport); diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index ee2fc65e37b..69434280b21 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -1,6 +1,7 @@ use crate::base; use crate::common::{self, CodegenCx}; use crate::debuginfo; +use crate::errors::{InvalidMinimumAlignment, LinkageConstOrMutType, SymbolAlreadyDefined}; use crate::llvm::{self, True}; use crate::llvm_util; use crate::type_::Type; @@ -19,6 +20,7 @@ use rustc_middle::mir::mono::MonoItem; use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::{bug, span_bug}; +use rustc_session::config::Lto; use rustc_target::abi::{ AddressSpace, Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange, }; @@ -26,7 +28,7 @@ use std::ops::Range; pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value { let alloc = alloc.inner(); - let mut llvals = Vec::with_capacity(alloc.provenance().len() + 1); + let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1); let dl = cx.data_layout(); let pointer_size = dl.pointer_size.bytes() as usize; @@ -38,9 +40,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< alloc: &'a Allocation, range: Range<usize>, ) { - let chunks = alloc - .init_mask() - .range_as_init_chunks(Size::from_bytes(range.start), Size::from_bytes(range.end)); + let chunks = alloc.init_mask().range_as_init_chunks(range.clone().into()); let chunk_to_llval = move |chunk| match chunk { InitChunk::Init(range) => { @@ -78,7 +78,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< } let mut next_offset = 0; - for &(offset, alloc_id) in alloc.provenance().iter() { + for &(offset, alloc_id) in alloc.provenance().ptrs().iter() { let offset = offset.bytes(); assert_eq!(offset as usize as u64, offset); let offset = offset as usize; @@ -145,7 +145,7 @@ fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: match Align::from_bits(min) { Ok(min) => align = align.max(min), Err(err) => { - cx.sess().err(&format!("invalid minimum global alignment: {}", err)); + cx.sess().emit_err(InvalidMinimumAlignment { err }); } } } @@ -173,10 +173,7 @@ fn check_and_apply_linkage<'ll, 'tcx>( let llty2 = if let ty::RawPtr(ref mt) = ty.kind() { cx.layout_of(mt.ty).llvm_type(cx) } else { - cx.sess().span_fatal( - cx.tcx.def_span(def_id), - "must have type `*const T` or `*mut T` due to `#[linkage]` attribute", - ) + cx.sess().emit_fatal(LinkageConstOrMutType { span: cx.tcx.def_span(def_id) }) }; unsafe { // Declare a symbol `foo` with the desired linkage. @@ -192,10 +189,10 @@ fn check_and_apply_linkage<'ll, 'tcx>( let mut real_name = "_rust_extern_with_linkage_".to_string(); real_name.push_str(sym); let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| { - cx.sess().span_fatal( - cx.tcx.def_span(def_id), - &format!("symbol `{}` is already defined", &sym), - ) + cx.sess().emit_fatal(SymbolAlreadyDefined { + span: cx.tcx.def_span(def_id), + symbol_name: sym, + }) }); llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage); llvm::LLVMSetInitializer(g2, g1); @@ -303,7 +300,8 @@ impl<'ll> CodegenCx<'ll, '_> { // ThinLTO can't handle this workaround in all cases, so we don't // emit the attrs. Instead we make them unnecessary by disallowing // dynamic linking when linker plugin based LTO is enabled. - !self.tcx.sess.opts.cg.linker_plugin_lto.enabled(); + !self.tcx.sess.opts.cg.linker_plugin_lto.enabled() && + self.tcx.sess.lto() != Lto::Thin; // If this assertion triggers, there's something wrong with commandline // argument validation. @@ -332,7 +330,10 @@ impl<'ll> CodegenCx<'ll, '_> { } } - if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) { + if self.use_dll_storage_attrs + && let Some(library) = self.tcx.native_library(def_id) + && library.kind.is_dllimport() + { // For foreign (native) libs we know the exact storage type to use. unsafe { llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); @@ -486,7 +487,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { // happens to be zero. Instead, we should only check the value of defined bytes // and set all undefined bytes to zero if this allocation is headed for the // BSS. - let all_bytes_are_zero = alloc.provenance().is_empty() + let all_bytes_are_zero = alloc.provenance().ptrs().is_empty() && alloc .inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()) .iter() @@ -510,7 +511,7 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { section.as_str().as_ptr().cast(), section.as_str().len() as c_uint, ); - assert!(alloc.provenance().is_empty()); + assert!(alloc.provenance().ptrs().is_empty()); // The `inspect` method is okay here because we checked for provenance, and // because we are doing this access to inspect the final interpreter state (not diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 79ddfd884df..4dcc7cd5447 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -3,6 +3,7 @@ use crate::back::write::to_llvm_code_model; use crate::callee::get_fn; use crate::coverageinfo; use crate::debuginfo; +use crate::errors::BranchProtectionRequiresAArch64; use crate::llvm; use crate::llvm_util; use crate::type_::Type; @@ -26,6 +27,7 @@ use rustc_session::config::{BranchProtection, CFGuard, CFProtection}; use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet}; use rustc_session::Session; use rustc_span::source_map::Span; +use rustc_span::source_map::Spanned; use rustc_target::abi::{ call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx, }; @@ -158,6 +160,10 @@ pub unsafe fn create_module<'ll>( if sess.target.arch == "s390x" { target_data_layout = target_data_layout.replace("-v128:64", ""); } + + if sess.target.arch == "riscv64" { + target_data_layout = target_data_layout.replace("-n32:64-", "-n64-"); + } } // Ensure the data-layout values hardcoded remain the defaults. @@ -271,7 +277,7 @@ pub unsafe fn create_module<'ll>( if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection { if sess.target.arch != "aarch64" { - sess.err("-Zbranch-protection is only supported on aarch64"); + sess.emit_err(BranchProtectionRequiresAArch64); } else { llvm::LLVMRustAddModuleFlag( llmod, @@ -570,8 +576,14 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn declare_c_main(&self, fn_type: Self::Type) -> Option<Self::Function> { - if self.get_declared_value("main").is_none() { - Some(self.declare_cfn("main", llvm::UnnamedAddr::Global, fn_type)) + let entry_name = self.sess().target.entry_name.as_ref(); + if self.get_declared_value(entry_name).is_none() { + Some(self.declare_entry_fn( + entry_name, + self.sess().target.entry_abi.into(), + llvm::UnnamedAddr::Global, + fn_type, + )) } else { // If the symbol already exists, it is an error: for example, the user wrote // #[no_mangle] extern "C" fn main(..) {..} @@ -947,7 +959,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> { #[inline] fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { if let LayoutError::SizeOverflow(_) = err { - self.sess().span_fatal(span, &err.to_string()) + self.sess().emit_fatal(Spanned { span, node: err }) } else { span_bug!(span, "failed to get layout for `{}`: {}", ty, err) } @@ -965,7 +977,7 @@ impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> { fn_abi_request: FnAbiRequest<'tcx>, ) -> ! { if let FnAbiError::Layout(LayoutError::SizeOverflow(_)) = err { - self.sess().span_fatal(span, &err.to_string()) + self.sess().emit_fatal(Spanned { span, node: err }) } else { match fn_abi_request { FnAbiRequest::OfFnPtr { sig, extra_args } => { diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index 0d1df6fb1ac..8a8d889a298 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -1,5 +1,6 @@ use crate::common::CodegenCx; use crate::coverageinfo; +use crate::errors::InstrumentCoverageRequiresLLVM12; use crate::llvm; use llvm::coverageinfo::CounterMappingRegion; @@ -37,7 +38,7 @@ pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) { // LLVM 12. let version = coverageinfo::mapping_version(); if version < 4 { - tcx.sess.fatal("rustc option `-C instrument-coverage` requires LLVM 12 or higher."); + tcx.sess.emit_fatal(InstrumentCoverageRequiresLLVM12); } debug!("Generating coverage map for CodegenUnit: `{}`", cx.codegen_unit.name()); @@ -129,7 +130,7 @@ impl CoverageMapGenerator { // LLVM Coverage Mapping Format version 6 (zero-based encoded as 5) // requires setting the first filename to the compilation directory. // Since rustc generates coverage maps with relative paths, the - // compilation directory can be combined with the the relative paths + // compilation directory can be combined with the relative paths // to get absolute paths, if needed. let working_dir = tcx .sess diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index b23fe3fc9d5..ca7a07d8391 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -39,6 +39,7 @@ use smallvec::SmallVec; use std::cell::OnceCell; use std::cell::RefCell; use std::iter; +use std::ops::Range; mod create_scope_map; pub mod gdb; @@ -163,12 +164,14 @@ impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> { variable_alloca: Self::Value, direct_offset: Size, indirect_offsets: &[Size], + fragment: Option<Range<Size>>, ) { - // Convert the direct and indirect offsets to address ops. + // Convert the direct and indirect offsets and fragment byte range to address ops. // FIXME(eddyb) use `const`s instead of getting the values via FFI, // the values should match the ones in the DWARF standard anyway. let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() }; let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() }; + let op_llvm_fragment = || unsafe { llvm::LLVMRustDIBuilderCreateOpLLVMFragment() }; let mut addr_ops = SmallVec::<[u64; 8]>::new(); if direct_offset.bytes() > 0 { @@ -182,6 +185,13 @@ impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> { addr_ops.push(offset.bytes() as u64); } } + if let Some(fragment) = fragment { + // `DW_OP_LLVM_fragment` takes as arguments the fragment's + // offset and size, both of them in bits. + addr_ops.push(op_llvm_fragment()); + addr_ops.push(fragment.start.bits() as u64); + addr_ops.push((fragment.end - fragment.start).bits() as u64); + } unsafe { // FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`. diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs index a40cfc8b23f..5cd0e1cb63a 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs @@ -72,7 +72,7 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>( layout.is_unsized() ); - if !layout.is_unsized() { + if layout.is_sized() { return None; } diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index f79ef11720d..dc21a02cec4 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -90,6 +90,28 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { declare_raw_fn(self, name, llvm::CCallConv, unnamed, visibility, fn_type) } + /// Declare an entry Function + /// + /// The ABI of this function can change depending on the target (although for now the same as + /// `declare_cfn`) + /// + /// If there’s a value with the same name already declared, the function will + /// update the declaration and return existing Value instead. + pub fn declare_entry_fn( + &self, + name: &str, + callconv: llvm::CallConv, + unnamed: llvm::UnnamedAddr, + fn_type: &'ll Type, + ) -> &'ll Value { + let visibility = if self.tcx.sess.target.default_hidden_visibility { + llvm::Visibility::Hidden + } else { + llvm::Visibility::Default + }; + declare_raw_fn(self, name, callconv, unnamed, visibility, fn_type) + } + /// Declare a Rust function. /// /// If there’s a value with the same name already declared, the function will diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs new file mode 100644 index 00000000000..0fafc214f2f --- /dev/null +++ b/compiler/rustc_codegen_llvm/src/errors.rs @@ -0,0 +1,139 @@ +use std::borrow::Cow; + +use rustc_errors::fluent; +use rustc_errors::DiagnosticBuilder; +use rustc_errors::ErrorGuaranteed; +use rustc_errors::Handler; +use rustc_errors::IntoDiagnostic; +use rustc_macros::{Diagnostic, Subdiagnostic}; +use rustc_span::Span; + +#[derive(Diagnostic)] +#[diag(codegen_llvm_unknown_ctarget_feature_prefix)] +#[note] +pub(crate) struct UnknownCTargetFeaturePrefix<'a> { + pub feature: &'a str, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_unknown_ctarget_feature)] +#[note] +pub(crate) struct UnknownCTargetFeature<'a> { + pub feature: &'a str, + #[subdiagnostic] + pub rust_feature: PossibleFeature<'a>, +} + +#[derive(Subdiagnostic)] +pub(crate) enum PossibleFeature<'a> { + #[help(possible_feature)] + Some { rust_feature: &'a str }, + #[help(consider_filing_feature_request)] + None, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_error_creating_import_library)] +pub(crate) struct ErrorCreatingImportLibrary<'a> { + pub lib_name: &'a str, + pub error: String, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_instrument_coverage_requires_llvm_12)] +pub(crate) struct InstrumentCoverageRequiresLLVM12; + +#[derive(Diagnostic)] +#[diag(codegen_llvm_symbol_already_defined)] +pub(crate) struct SymbolAlreadyDefined<'a> { + #[primary_span] + pub span: Span, + pub symbol_name: &'a str, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_branch_protection_requires_aarch64)] +pub(crate) struct BranchProtectionRequiresAArch64; + +#[derive(Diagnostic)] +#[diag(codegen_llvm_invalid_minimum_alignment)] +pub(crate) struct InvalidMinimumAlignment { + pub err: String, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_linkage_const_or_mut_type)] +pub(crate) struct LinkageConstOrMutType { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_sanitizer_memtag_requires_mte)] +pub(crate) struct SanitizerMemtagRequiresMte; + +#[derive(Diagnostic)] +#[diag(codegen_llvm_archive_build_failure)] +pub(crate) struct ArchiveBuildFailure { + pub error: std::io::Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_error_writing_def_file)] +pub(crate) struct ErrorWritingDEFFile { + pub error: std::io::Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_error_calling_dlltool)] +pub(crate) struct ErrorCallingDllTool { + pub error: std::io::Error, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_dlltool_fail_import_library)] +pub(crate) struct DlltoolFailImportLibrary<'a> { + pub stdout: Cow<'a, str>, + pub stderr: Cow<'a, str>, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_unknown_archive_kind)] +pub(crate) struct UnknownArchiveKind<'a> { + pub kind: &'a str, +} + +#[derive(Diagnostic)] +#[diag(codegen_llvm_dynamic_linking_with_lto)] +#[note] +pub(crate) struct DynamicLinkingWithLTO; + +#[derive(Diagnostic)] +#[diag(codegen_llvm_fail_parsing_target_machine_config_to_target_machine)] +pub(crate) struct FailParsingTargetMachineConfigToTargetMachine { + pub error: String, +} + +pub(crate) struct TargetFeatureDisableOrEnable<'a> { + pub features: &'a [&'a str], + pub span: Option<Span>, + pub missing_features: Option<MissingFeatures>, +} + +#[derive(Subdiagnostic)] +#[help(codegen_llvm_missing_features)] +pub(crate) struct MissingFeatures; + +impl IntoDiagnostic<'_, ErrorGuaranteed> for TargetFeatureDisableOrEnable<'_> { + fn into_diagnostic(self, sess: &'_ Handler) -> DiagnosticBuilder<'_, ErrorGuaranteed> { + let mut diag = sess.struct_err(fluent::codegen_llvm_target_feature_disable_or_enable); + if let Some(span) = self.span { + diag.set_span(span); + }; + if let Some(missing_features) = self.missing_features { + diag.subdiagnostic(missing_features); + } + diag.set_arg("features", self.features.join(", ")); + diag + } +} diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 825011941a2..cf590a43826 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -340,17 +340,26 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { sym::black_box => { args[0].val.store(self, result); - + let result_val_span = [result.llval]; // We need to "use" the argument in some way LLVM can't introspect, and on // targets that support it we can typically leverage inline assembly to do // this. LLVM's interpretation of inline assembly is that it's, well, a black // box. This isn't the greatest implementation since it probably deoptimizes // more than we want, but it's so far good enough. + // + // For zero-sized types, the location pointed to by the result may be + // uninitialized. Do not "use" the result in this case; instead just clobber + // the memory. + let (constraint, inputs): (&str, &[_]) = if result.layout.is_zst() { + ("~{memory}", &[]) + } else { + ("r,~{memory}", &result_val_span) + }; crate::asm::inline_asm_call( self, "", - "r,~{memory}", - &[result.llval], + constraint, + inputs, self.type_void(), true, false, diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 89c7e51d09e..246e82545c8 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -12,6 +12,8 @@ #![feature(iter_intersperse)] #![recursion_limit = "256"] #![allow(rustc::potential_query_instability)] +#![deny(rustc::untranslatable_diagnostic)] +#![deny(rustc::diagnostic_outside_of_impl)] #[macro_use] extern crate rustc_macros; @@ -20,6 +22,7 @@ extern crate tracing; use back::write::{create_informational_target_machine, create_target_machine}; +use errors::FailParsingTargetMachineConfigToTargetMachine; pub use llvm_util::target_features; use rustc_ast::expand::allocator::AllocatorKind; use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; @@ -62,6 +65,7 @@ mod context; mod coverageinfo; mod debuginfo; mod declare; +mod errors; mod intrinsic; // The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912. @@ -108,11 +112,11 @@ impl ExtraBackendMethods for LlvmCodegenBackend { tcx: TyCtxt<'tcx>, module_name: &str, kind: AllocatorKind, - has_alloc_error_handler: bool, + alloc_error_handler_kind: AllocatorKind, ) -> ModuleLlvm { let mut module_llvm = ModuleLlvm::new_metadata(tcx, module_name); unsafe { - allocator::codegen(tcx, &mut module_llvm, module_name, kind, has_alloc_error_handler); + allocator::codegen(tcx, &mut module_llvm, module_name, kind, alloc_error_handler_kind); } module_llvm } @@ -412,7 +416,7 @@ impl ModuleLlvm { let tm = match (cgcx.tm_factory)(tm_factory_config) { Ok(m) => m, Err(e) => { - handler.struct_err(&e).emit(); + handler.emit_err(FailParsingTargetMachineConfigToTargetMachine { error: e }); return Err(FatalError); } }; diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 3b8a9f3f77f..f4519849730 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -183,7 +183,6 @@ pub enum AttributeKind { OptimizeNone = 24, ReturnsTwice = 25, ReadNone = 26, - InaccessibleMemOnly = 27, SanitizeHWAddress = 28, WillReturn = 29, StackProtectReq = 30, @@ -590,6 +589,15 @@ pub enum ChecksumKind { SHA256, } +/// LLVMRustMemoryEffects +#[derive(Copy, Clone)] +#[repr(C)] +pub enum MemoryEffects { + None, + ReadOnly, + InaccessibleMemOnly, +} + extern "C" { type Opaque; } @@ -1175,6 +1183,7 @@ extern "C" { pub fn LLVMRustCreateUWTableAttr(C: &Context, async_: bool) -> &Attribute; pub fn LLVMRustCreateAllocSizeAttr(C: &Context, size_arg: u32) -> &Attribute; pub fn LLVMRustCreateAllocKindAttr(C: &Context, size_arg: u64) -> &Attribute; + pub fn LLVMRustCreateMemoryEffectsAttr(C: &Context, effects: MemoryEffects) -> &Attribute; // Operations on functions pub fn LLVMRustGetOrInsertFunction<'a>( @@ -2202,6 +2211,7 @@ extern "C" { ) -> &'a DILocation; pub fn LLVMRustDIBuilderCreateOpDeref() -> u64; pub fn LLVMRustDIBuilderCreateOpPlusUconst() -> u64; + pub fn LLVMRustDIBuilderCreateOpLLVMFragment() -> u64; #[allow(improper_ctypes)] pub fn LLVMRustWriteTypeToString(Type: &Type, s: &RustString); diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 6602a4ab863..f820e752371 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -185,6 +185,13 @@ impl AttributeKind { } } +impl MemoryEffects { + /// Create an LLVM Attribute with these memory effects. + pub fn create_attr(self, llcx: &Context) -> &Attribute { + unsafe { LLVMRustCreateMemoryEffectsAttr(llcx, self) } + } +} + pub fn set_section(llglobal: &Value, section_name: &str) { let section_name_cstr = CString::new(section_name).expect("unexpected CString error"); unsafe { diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index 2fd58567c48..4af1aaec0a1 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -1,4 +1,8 @@ use crate::back::write::create_informational_target_machine; +use crate::errors::{ + PossibleFeature, TargetFeatureDisableOrEnable, UnknownCTargetFeature, + UnknownCTargetFeaturePrefix, +}; use crate::llvm; use libc::c_int; use rustc_codegen_ssa::target_features::{ @@ -159,6 +163,9 @@ pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> SmallVec<[&'a str; 2] ("x86", "rdrand") => smallvec!["rdrnd"], ("x86", "bmi1") => smallvec!["bmi"], ("x86", "cmpxchg16b") => smallvec!["cx16"], + // FIXME: These aliases are misleading, and should be removed before avx512_target_feature is + // stabilized. They must remain until std::arch switches off them. + // rust#100752 ("x86", "avx512vaes") => smallvec!["vaes"], ("x86", "avx512gfni") => smallvec!["gfni"], ("x86", "avx512vpclmulqdq") => smallvec!["vpclmulqdq"], @@ -434,12 +441,7 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str Some(c @ '+' | c @ '-') => c, Some(_) => { if diagnostics { - let mut diag = sess.struct_warn(&format!( - "unknown feature specified for `-Ctarget-feature`: `{}`", - s - )); - diag.note("features must begin with a `+` to enable or `-` to disable it"); - diag.emit(); + sess.emit_warning(UnknownCTargetFeaturePrefix { feature: s }); } return None; } @@ -456,17 +458,15 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str None } }); - let mut diag = sess.struct_warn(&format!( - "unknown feature specified for `-Ctarget-feature`: `{}`", - feature - )); - diag.note("it is still passed through to the codegen backend"); - if let Some(rust_feature) = rust_feature { - diag.help(&format!("you might have meant: `{}`", rust_feature)); + let unknown_feature = if let Some(rust_feature) = rust_feature { + UnknownCTargetFeature { + feature, + rust_feature: PossibleFeature::Some { rust_feature }, + } } else { - diag.note("consider filing a feature request"); - } - diag.emit(); + UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None } + }; + sess.emit_warning(unknown_feature); } if diagnostics { @@ -492,10 +492,11 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str features.extend(feats); if diagnostics && let Some(f) = check_tied_features(sess, &featsmap) { - sess.err(&format!( - "target features {} must all be enabled or disabled together", - f.join(", ") - )); + sess.emit_err(TargetFeatureDisableOrEnable { + features: f, + span: None, + missing_features: None, + }); } features diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs index 1eceb7f5c87..76f692b2016 100644 --- a/compiler/rustc_codegen_llvm/src/mono_item.rs +++ b/compiler/rustc_codegen_llvm/src/mono_item.rs @@ -1,6 +1,7 @@ use crate::attributes; use crate::base; use crate::context::CodegenCx; +use crate::errors::SymbolAlreadyDefined; use crate::llvm; use crate::type_of::LayoutLlvmExt; use rustc_codegen_ssa::traits::*; @@ -25,10 +26,8 @@ impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> { let llty = self.layout_of(ty).llvm_type(self); let g = self.define_global(symbol_name, llty).unwrap_or_else(|| { - self.sess().span_fatal( - self.tcx.def_span(def_id), - &format!("symbol `{}` is already defined", symbol_name), - ) + self.sess() + .emit_fatal(SymbolAlreadyDefined { span: self.tcx.def_span(def_id), symbol_name }) }); unsafe { diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index eeb38d4ecf5..5eec7dc6130 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -127,10 +127,6 @@ impl<'ll> CodegenCx<'ll, '_> { pub(crate) fn type_variadic_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) } } - - pub(crate) fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { - unsafe { llvm::LLVMRustArrayType(ty, len) } - } } impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { @@ -231,6 +227,10 @@ impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn val_ty(&self, v: &'ll Value) -> &'ll Type { common::val_ty(v) } + + fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { + unsafe { llvm::LLVMRustArrayType(ty, len) } + } } impl Type { diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index dc1165835e7..182adf81785 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -140,7 +140,7 @@ fn struct_llfields<'a, 'tcx>( prev_effective_align = effective_field_align; } let padding_used = result.len() > field_count; - if !layout.is_unsized() && field_count > 0 { + if layout.is_sized() && field_count > 0 { if offset > layout.size { bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset); } |
