diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
39 files changed, 1770 insertions, 1807 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 009e7e2487b..ac7583f5666 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -24,6 +24,7 @@ use crate::attributes::{self, llfn_attrs_from_instance}; use crate::builder::Builder; use crate::context::CodegenCx; use crate::llvm::{self, Attribute, AttributePlace}; +use crate::llvm_util; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; @@ -41,12 +42,13 @@ trait ArgAttributesExt { const ABI_AFFECTING_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 1] = [(ArgAttribute::InReg, llvm::AttributeKind::InReg)]; -const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 5] = [ +const OPTIMIZATION_ATTRIBUTES: [(ArgAttribute, llvm::AttributeKind); 6] = [ (ArgAttribute::NoAlias, llvm::AttributeKind::NoAlias), - (ArgAttribute::NoCapture, llvm::AttributeKind::NoCapture), + (ArgAttribute::CapturesAddress, llvm::AttributeKind::CapturesAddress), (ArgAttribute::NonNull, llvm::AttributeKind::NonNull), (ArgAttribute::ReadOnly, llvm::AttributeKind::ReadOnly), (ArgAttribute::NoUndef, llvm::AttributeKind::NoUndef), + (ArgAttribute::CapturesReadOnly, llvm::AttributeKind::CapturesReadOnly), ]; fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&'ll Attribute; 8]> { @@ -82,6 +84,12 @@ fn get_attrs<'ll>(this: &ArgAttributes, cx: &CodegenCx<'ll, '_>) -> SmallVec<[&' } for (attr, llattr) in OPTIMIZATION_ATTRIBUTES { if regular.contains(attr) { + // captures(...) is only available since LLVM 21. + if (attr == ArgAttribute::CapturesReadOnly || attr == ArgAttribute::CapturesAddress) + && llvm_util::get_version() < (21, 0, 0) + { + continue; + } attrs.push(llattr.create_attr(cx.llcx)); } } @@ -500,7 +508,16 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } } PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => { - apply(attrs); + let i = apply(attrs); + if cx.sess().opts.optimize != config::OptLevel::No + && llvm_util::get_version() >= (21, 0, 0) + { + attributes::apply_to_llfn( + llfn, + llvm::AttributePlace::Argument(i), + &[llvm::AttributeKind::DeadOnReturn.create_attr(cx.llcx)], + ); + } } PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => { assert!(!on_stack); diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index 2b5090ed6db..df3e49279d9 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -8,11 +8,12 @@ use rustc_middle::bug; use rustc_middle::ty::TyCtxt; use rustc_session::config::{DebugInfo, OomStrategy}; use rustc_symbol_mangling::mangle_internal_symbol; +use smallvec::SmallVec; use crate::builder::SBuilder; use crate::declare::declare_simple_fn; -use crate::llvm::{self, False, True, Type, Value}; -use crate::{SimpleCx, attributes, debuginfo}; +use crate::llvm::{self, FALSE, TRUE, Type, Value}; +use crate::{SimpleCx, attributes, debuginfo, llvm_util}; pub(crate) unsafe fn codegen( tcx: TyCtxt<'_>, @@ -79,7 +80,7 @@ pub(crate) unsafe fn codegen( &cx, &mangle_internal_symbol(tcx, OomStrategy::SYMBOL), &i8, - &llvm::LLVMConstInt(i8, tcx.sess.opts.unstable_opts.oom.should_panic() as u64, False), + &llvm::LLVMConstInt(i8, tcx.sess.opts.unstable_opts.oom.should_panic() as u64, FALSE), ); // __rust_no_alloc_shim_is_unstable_v2 @@ -147,6 +148,20 @@ fn create_wrapper_function( llvm::Visibility::from_generic(tcx.sess.default_visibility()), ty, ); + + let mut attrs = SmallVec::<[_; 2]>::new(); + + let target_cpu = llvm_util::target_cpu(tcx.sess); + let target_cpu_attr = llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu); + + let tune_cpu_attr = llvm_util::tune_cpu(tcx.sess) + .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu)); + + attrs.push(target_cpu_attr); + attrs.extend(tune_cpu_attr); + + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &attrs); + let no_return = if no_return { // -> ! DIFlagNoReturn let no_return = llvm::AttributeKind::NoReturn.create_attr(cx.llcx); @@ -186,7 +201,7 @@ fn create_wrapper_function( .map(|(i, _)| llvm::get_param(llfn, i as c_uint)) .collect::<Vec<_>>(); let ret = bx.call(ty, callee, &args, None); - llvm::LLVMSetTailCall(ret, True); + llvm::LLVMSetTailCall(ret, TRUE); if output.is_some() { bx.ret(ret); } else { diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 9ddadcf16aa..38c1d3b53e8 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -16,6 +16,7 @@ use tracing::debug; use crate::builder::Builder; use crate::common::Funclet; use crate::context::CodegenCx; +use crate::llvm::ToLlvmBool; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; @@ -384,15 +385,19 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> { ) { let asm_arch = self.tcx.sess.asm_arch.unwrap(); - // Default to Intel syntax on x86 - let intel_syntax = matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64) - && !options.contains(InlineAsmOptions::ATT_SYNTAX); - // Build the template string let mut template_str = String::new(); - if intel_syntax { - template_str.push_str(".intel_syntax\n"); + + // On X86 platforms there are two assembly syntaxes. Rust uses intel by default, + // but AT&T can be specified explicitly. + if matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64) { + if options.contains(InlineAsmOptions::ATT_SYNTAX) { + template_str.push_str(".att_syntax\n") + } else { + template_str.push_str(".intel_syntax\n") + } } + for piece in template { match *piece { InlineAsmTemplatePiece::String(ref s) => template_str.push_str(s), @@ -431,7 +436,11 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> { } } } - if intel_syntax { + + // Just to play it safe, if intel was used, reset the assembly syntax to att. + if matches!(asm_arch, InlineAsmArch::X86 | InlineAsmArch::X86_64) + && !options.contains(InlineAsmOptions::ATT_SYNTAX) + { template_str.push_str("\n.att_syntax\n"); } @@ -462,10 +471,6 @@ pub(crate) fn inline_asm_call<'ll>( dest: Option<&'ll llvm::BasicBlock>, catch_funclet: Option<(&'ll llvm::BasicBlock, Option<&Funclet<'ll>>)>, ) -> Option<&'ll Value> { - let volatile = if volatile { llvm::True } else { llvm::False }; - let alignstack = if alignstack { llvm::True } else { llvm::False }; - let can_throw = if unwind { llvm::True } else { llvm::False }; - let argtys = inputs .iter() .map(|v| { @@ -492,10 +497,10 @@ pub(crate) fn inline_asm_call<'ll>( asm.len(), cons.as_ptr(), cons.len(), - volatile, - alignstack, + volatile.to_llvm_bool(), + alignstack.to_llvm_bool(), dia, - can_throw, + unwind.to_llvm_bool(), ) }; diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index 1ea5a062254..5affb26483a 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -1,6 +1,6 @@ //! Set and unset common attributes on LLVM values. -use rustc_attr_data_structures::{InlineAttr, InstructionSetAttr, OptimizeAttr}; use rustc_codegen_ssa::traits::*; +use rustc_hir::attrs::{InlineAttr, InstructionSetAttr, OptimizeAttr}; use rustc_hir::def_id::DefId; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, PatchableFunctionEntry}; use rustc_middle::ty::{self, TyCtxt}; @@ -28,22 +28,6 @@ pub(crate) fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[ } } -pub(crate) fn has_attr(llfn: &Value, idx: AttributePlace, attr: AttributeKind) -> bool { - llvm::HasAttributeAtIndex(llfn, idx, attr) -} - -pub(crate) fn has_string_attr(llfn: &Value, name: &str) -> bool { - llvm::HasStringAttribute(llfn, name) -} - -pub(crate) fn remove_from_llfn(llfn: &Value, place: AttributePlace, kind: AttributeKind) { - llvm::RemoveRustEnumAttributeAtIndex(llfn, place, kind); -} - -pub(crate) fn remove_string_attr_from_llfn(llfn: &Value, name: &str) { - llvm::RemoveStringAttrFromFn(llfn, name); -} - /// Get LLVM attribute for the provided inline heuristic. #[inline] fn inline_attr<'ll>(cx: &CodegenCx<'ll, '_>, inline: InlineAttr) -> Option<&'ll Attribute> { @@ -344,7 +328,7 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>( llfn: &'ll Value, instance: ty::Instance<'tcx>, ) { - let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); + let codegen_fn_attrs = cx.tcx.codegen_instance_attrs(instance.def); let mut to_add = SmallVec::<[_; 16]>::new(); @@ -436,6 +420,16 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>( || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED) { to_add.push(create_alloc_family_attr(cx.llcx)); + if let Some(zv) = + cx.tcx.get_attr(instance.def_id(), rustc_span::sym::rustc_allocator_zeroed_variant) + && let Some(name) = zv.value_str() + { + to_add.push(llvm::CreateAttrStringValue( + cx.llcx, + "alloc-variant-zeroed", + &mangle_internal_symbol(cx.tcx, name.as_str()), + )); + } // apply to argument place instead of function let alloc_align = AttributeKind::AllocAlign.create_attr(cx.llcx); attributes::apply_to_llfn(llfn, AttributePlace::Argument(1), &[alloc_align]); @@ -513,7 +507,7 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>( to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-module", module)); let name = - codegen_fn_attrs.link_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id())); + codegen_fn_attrs.symbol_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id())); let name = name.as_str(); to_add.push(llvm::CreateAttrStringValue(cx.llcx, "wasm-import-name", name)); } diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index 0a161442933..7a340ae83f3 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -1,104 +1,21 @@ //! A helper class for dealing with static archives -use std::ffi::{CStr, CString, c_char, c_void}; -use std::path::{Path, PathBuf}; -use std::{io, mem, ptr, str}; +use std::ffi::{CStr, c_char, c_void}; +use std::io; use rustc_codegen_ssa::back::archive::{ - ArArchiveBuilder, ArchiveBuildFailure, ArchiveBuilder, ArchiveBuilderBuilder, - DEFAULT_OBJECT_READER, ObjectReader, UnknownArchiveKind, try_extract_macho_fat_archive, + ArArchiveBuilder, ArchiveBuilder, ArchiveBuilderBuilder, DEFAULT_OBJECT_READER, ObjectReader, }; use rustc_session::Session; -use crate::llvm::archive_ro::{ArchiveRO, Child}; -use crate::llvm::{self, ArchiveKind, last_error}; - -/// Helper for adding many files to an archive. -#[must_use = "must call build() to finish building the archive"] -pub(crate) struct LlvmArchiveBuilder<'a> { - sess: &'a Session, - additions: Vec<Addition>, -} - -enum Addition { - File { path: PathBuf, name_in_archive: String }, - Archive { path: PathBuf, archive: ArchiveRO, skip: Box<dyn FnMut(&str) -> bool> }, -} - -impl Addition { - fn path(&self) -> &Path { - match self { - Addition::File { path, .. } | Addition::Archive { path, .. } => path, - } - } -} - -fn is_relevant_child(c: &Child<'_>) -> bool { - match c.name() { - Some(name) => !name.contains("SYMDEF"), - None => false, - } -} - -impl<'a> ArchiveBuilder for LlvmArchiveBuilder<'a> { - fn add_archive( - &mut self, - archive: &Path, - skip: Box<dyn FnMut(&str) -> bool + 'static>, - ) -> io::Result<()> { - let mut archive = archive.to_path_buf(); - if self.sess.target.llvm_target.contains("-apple-macosx") { - if let Some(new_archive) = try_extract_macho_fat_archive(self.sess, &archive)? { - archive = new_archive - } - } - let archive_ro = match ArchiveRO::open(&archive) { - Ok(ar) => ar, - Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)), - }; - if self.additions.iter().any(|ar| ar.path() == archive) { - return Ok(()); - } - self.additions.push(Addition::Archive { - path: archive, - archive: archive_ro, - skip: Box::new(skip), - }); - Ok(()) - } - - /// Adds an arbitrary file to this archive - fn add_file(&mut self, file: &Path) { - let name = file.file_name().unwrap().to_str().unwrap(); - self.additions - .push(Addition::File { path: file.to_path_buf(), name_in_archive: name.to_owned() }); - } - - /// Combine the provided files, rlibs, and native libraries into a single - /// `Archive`. - fn build(mut self: Box<Self>, output: &Path) -> bool { - match self.build_with_llvm(output) { - Ok(any_members) => any_members, - Err(error) => { - self.sess.dcx().emit_fatal(ArchiveBuildFailure { path: output.to_owned(), error }) - } - } - } -} +use crate::llvm; pub(crate) struct LlvmArchiveBuilderBuilder; impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder + 'a> { - // Keeping LlvmArchiveBuilder around in case of a regression caused by using - // ArArchiveBuilder. - // FIXME(#128955) remove a couple of months after #128936 gets merged in case - // no regression is found. - if false { - Box::new(LlvmArchiveBuilder { sess, additions: Vec::new() }) - } else { - Box::new(ArArchiveBuilder::new(sess, &LLVM_OBJECT_READER)) - } + // Use the `object` crate to build archives, with a little bit of help from LLVM. + Box::new(ArArchiveBuilder::new(sess, &LLVM_OBJECT_READER)) } } @@ -178,91 +95,3 @@ fn llvm_is_64_bit_object_file(buf: &[u8]) -> bool { fn llvm_is_ec_object_file(buf: &[u8]) -> bool { unsafe { llvm::LLVMRustIsECObject(buf.as_ptr(), buf.len()) } } - -impl<'a> LlvmArchiveBuilder<'a> { - fn build_with_llvm(&mut self, output: &Path) -> io::Result<bool> { - let kind = &*self.sess.target.archive_format; - let kind = kind - .parse::<ArchiveKind>() - .map_err(|_| kind) - .unwrap_or_else(|kind| self.sess.dcx().emit_fatal(UnknownArchiveKind { kind })); - - let mut additions = mem::take(&mut self.additions); - // Values in the `members` list below will contain pointers to the strings allocated here. - // So they need to get dropped after all elements of `members` get freed. - let mut strings = Vec::new(); - let mut members = Vec::new(); - - let dst = CString::new(output.to_str().unwrap())?; - - unsafe { - for addition in &mut additions { - match addition { - Addition::File { path, name_in_archive } => { - let path = CString::new(path.to_str().unwrap())?; - let name = CString::new(name_in_archive.as_bytes())?; - members.push(llvm::LLVMRustArchiveMemberNew( - path.as_ptr(), - name.as_ptr(), - None, - )); - strings.push(path); - strings.push(name); - } - Addition::Archive { archive, skip, .. } => { - for child in archive.iter() { - let child = child.map_err(string_to_io_error)?; - if !is_relevant_child(&child) { - continue; - } - let child_name = child.name().unwrap(); - if skip(child_name) { - continue; - } - - // It appears that LLVM's archive writer is a little - // buggy if the name we pass down isn't just the - // filename component, so chop that off here and - // pass it in. - // - // See LLVM bug 25877 for more info. - let child_name = - Path::new(child_name).file_name().unwrap().to_str().unwrap(); - let name = CString::new(child_name)?; - let m = llvm::LLVMRustArchiveMemberNew( - ptr::null(), - name.as_ptr(), - Some(child.raw), - ); - members.push(m); - strings.push(name); - } - } - } - } - - let r = llvm::LLVMRustWriteArchive( - dst.as_ptr(), - members.len() as libc::size_t, - members.as_ptr() as *const &_, - true, - kind, - self.sess.target.arch == "arm64ec", - ); - let ret = if r.into_result().is_err() { - let msg = last_error().unwrap_or_else(|| "failed to write archive".into()); - Err(io::Error::new(io::ErrorKind::Other, msg)) - } else { - Ok(!members.is_empty()) - }; - for member in members { - llvm::LLVMRustArchiveMemberFree(member); - } - ret - } - } -} - -fn string_to_io_error(s: String) -> io::Error { - io::Error::new(io::ErrorKind::Other, format!("bad archive: {s}")) -} diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 9c62244f3c9..fc38c4f3e51 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -1,80 +1,83 @@ use std::collections::BTreeMap; use std::ffi::{CStr, CString}; use std::fs::File; -use std::path::Path; +use std::path::{Path, PathBuf}; use std::ptr::NonNull; use std::sync::Arc; use std::{io, iter, slice}; use object::read::archive::ArchiveFile; -use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared}; -use rustc_codegen_ssa::back::symbol_export; +use object::{Object, ObjectSection}; +use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule, ThinShared}; use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::memmap::Mmap; -use rustc_errors::{DiagCtxtHandle, FatalError}; -use rustc_hir::def_id::LOCAL_CRATE; +use rustc_errors::DiagCtxtHandle; +use rustc_hir::attrs::SanitizerSet; use rustc_middle::bug; use rustc_middle::dep_graph::WorkProduct; -use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel}; -use rustc_session::config::{self, CrateType, Lto}; +use rustc_session::config::{self, Lto}; use tracing::{debug, info}; use crate::back::write::{ self, CodegenDiagnosticsStage, DiagnosticHandlers, bitcode_section_name, save_temp_bitcode, }; -use crate::errors::{ - DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro, -}; -use crate::llvm::AttributePlace::Function; +use crate::errors::{LlvmError, LtoBitcodeFromRlib}; use crate::llvm::{self, build_string}; -use crate::{LlvmCodegenBackend, ModuleLlvm, SimpleCx, attributes}; +use crate::{LlvmCodegenBackend, ModuleLlvm, SimpleCx}; /// We keep track of the computed LTO cache keys from the previous /// session to determine which CGUs we can reuse. const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin"; -fn crate_type_allows_lto(crate_type: CrateType) -> bool { - match crate_type { - CrateType::Executable - | CrateType::Dylib - | CrateType::Staticlib - | CrateType::Cdylib - | CrateType::ProcMacro - | CrateType::Sdylib => true, - CrateType::Rlib => false, - } -} - fn prepare_lto( cgcx: &CodegenContext<LlvmCodegenBackend>, + exported_symbols_for_lto: &[String], + each_linked_rlib_for_lto: &[PathBuf], dcx: DiagCtxtHandle<'_>, -) -> Result<(Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>), FatalError> { - let export_threshold = match cgcx.lto { - // We're just doing LTO for our one crate - Lto::ThinLocal => SymbolExportLevel::Rust, +) -> (Vec<CString>, Vec<(SerializedModule<ModuleBuffer>, CString)>) { + let mut symbols_below_threshold = exported_symbols_for_lto + .iter() + .map(|symbol| CString::new(symbol.to_owned()).unwrap()) + .collect::<Vec<CString>>(); - // We're doing LTO for the entire crate graph - Lto::Fat | Lto::Thin => symbol_export::crates_export_threshold(&cgcx.crate_types), + if cgcx.regular_module_config.instrument_coverage + || cgcx.regular_module_config.pgo_gen.enabled() + { + // These are weak symbols that point to the profile version and the + // profile name, which need to be treated as exported so LTO doesn't nix + // them. + const PROFILER_WEAK_SYMBOLS: [&CStr; 2] = + [c"__llvm_profile_raw_version", c"__llvm_profile_filename"]; - Lto::No => panic!("didn't request LTO but we're doing LTO"), - }; + symbols_below_threshold.extend(PROFILER_WEAK_SYMBOLS.iter().map(|&sym| sym.to_owned())); + } - let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| { - if info.level.is_below_threshold(export_threshold) || info.used { - Some(CString::new(name.as_str()).unwrap()) - } else { - None + if cgcx.regular_module_config.sanitizer.contains(SanitizerSet::MEMORY) { + let mut msan_weak_symbols = Vec::new(); + + // Similar to profiling, preserve weak msan symbol during LTO. + if cgcx.regular_module_config.sanitizer_recover.contains(SanitizerSet::MEMORY) { + msan_weak_symbols.push(c"__msan_keep_going"); + } + + if cgcx.regular_module_config.sanitizer_memory_track_origins != 0 { + msan_weak_symbols.push(c"__msan_track_origins"); } - }; - let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO"); - let mut symbols_below_threshold = { - let _timer = cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold"); - exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<CString>>() - }; - info!("{} symbols to preserve in this crate", symbols_below_threshold.len()); + + symbols_below_threshold.extend(msan_weak_symbols.into_iter().map(|sym| sym.to_owned())); + } + + // Preserve LLVM-injected, ASAN-related symbols. + // See also https://github.com/rust-lang/rust/issues/113404. + symbols_below_threshold.push(c"___asan_globals_registered".to_owned()); + + // __llvm_profile_counter_bias is pulled in at link time by an undefined reference to + // __llvm_profile_runtime, therefore we won't know until link time if this symbol + // should have default visibility. + symbols_below_threshold.push(c"__llvm_profile_counter_bias".to_owned()); // If we're performing LTO for the entire crate graph, then for each of our // upstream dependencies, find the corresponding rlib and load the bitcode @@ -84,37 +87,7 @@ fn prepare_lto( // with either fat or thin LTO let mut upstream_modules = Vec::new(); if cgcx.lto != Lto::ThinLocal { - // Make sure we actually can run LTO - for crate_type in cgcx.crate_types.iter() { - if !crate_type_allows_lto(*crate_type) { - dcx.emit_err(LtoDisallowed); - return Err(FatalError); - } else if *crate_type == CrateType::Dylib { - if !cgcx.opts.unstable_opts.dylib_lto { - dcx.emit_err(LtoDylib); - return Err(FatalError); - } - } else if *crate_type == CrateType::ProcMacro && !cgcx.opts.unstable_opts.dylib_lto { - dcx.emit_err(LtoProcMacro); - return Err(FatalError); - } - } - - if cgcx.opts.cg.prefer_dynamic && !cgcx.opts.unstable_opts.dylib_lto { - dcx.emit_err(DynamicLinkingWithLTO); - return Err(FatalError); - } - - for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() { - let exported_symbols = - cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO"); - { - let _timer = - cgcx.prof.generic_activity("LLVM_lto_generate_symbols_below_threshold"); - symbols_below_threshold - .extend(exported_symbols[&cnum].iter().filter_map(symbol_filter)); - } - + for path in each_linked_rlib_for_lto { let archive_data = unsafe { Mmap::map(std::fs::File::open(&path).expect("couldn't open rlib")) .expect("couldn't map rlib") @@ -138,20 +111,13 @@ fn prepare_lto( let module = SerializedModule::FromRlib(data.to_vec()); upstream_modules.push((module, CString::new(name).unwrap())); } - Err(e) => { - dcx.emit_err(e); - return Err(FatalError); - } + Err(e) => dcx.emit_fatal(e), } } } } - // __llvm_profile_counter_bias is pulled in at link time by an undefined reference to - // __llvm_profile_runtime, therefore we won't know until link time if this symbol - // should have default visibility. - symbols_below_threshold.push(c"__llvm_profile_counter_bias".to_owned()); - Ok((symbols_below_threshold, upstream_modules)) + (symbols_below_threshold, upstream_modules) } fn get_bitcode_slice_from_object_data<'a>( @@ -168,46 +134,32 @@ fn get_bitcode_slice_from_object_data<'a>( // name" which in the public API for sections gets treated as part of the section name, but // internally in MachOObjectFile.cpp gets treated separately. let section_name = bitcode_section_name(cgcx).to_str().unwrap().trim_start_matches("__LLVM,"); - let mut len = 0; - let data = unsafe { - llvm::LLVMRustGetSliceFromObjectDataByName( - obj.as_ptr(), - obj.len(), - section_name.as_ptr(), - section_name.len(), - &mut len, - ) - }; - if !data.is_null() { - assert!(len != 0); - let bc = unsafe { slice::from_raw_parts(data, len) }; - // `bc` must be a sub-slice of `obj`. - assert!(obj.as_ptr() <= bc.as_ptr()); - assert!(bc[bc.len()..bc.len()].as_ptr() <= obj[obj.len()..obj.len()].as_ptr()); + let obj = + object::File::parse(obj).map_err(|err| LtoBitcodeFromRlib { err: err.to_string() })?; - Ok(bc) - } else { - assert!(len == 0); - Err(LtoBitcodeFromRlib { - llvm_err: llvm::last_error().unwrap_or_else(|| "unknown LLVM error".to_string()), - }) - } + let section = obj + .section_by_name(section_name) + .ok_or_else(|| LtoBitcodeFromRlib { err: format!("Can't find section {section_name}") })?; + + section.data().map_err(|err| LtoBitcodeFromRlib { err: err.to_string() }) } /// Performs fat LTO by merging all modules into a single one and returning it /// for further optimization. pub(crate) fn run_fat( cgcx: &CodegenContext<LlvmCodegenBackend>, + exported_symbols_for_lto: &[String], + each_linked_rlib_for_lto: &[PathBuf], modules: Vec<FatLtoInput<LlvmCodegenBackend>>, - cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, -) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> { +) -> ModuleCodegen<ModuleLlvm> { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); - let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, dcx)?; + let (symbols_below_threshold, upstream_modules) = + prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx); let symbols_below_threshold = symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>(); - fat_lto(cgcx, dcx, modules, cached_modules, upstream_modules, &symbols_below_threshold) + fat_lto(cgcx, dcx, modules, upstream_modules, &symbols_below_threshold) } /// Performs thin LTO by performing necessary global analysis and returning two @@ -215,12 +167,15 @@ pub(crate) fn run_fat( /// can simply be copied over from the incr. comp. cache. pub(crate) fn run_thin( cgcx: &CodegenContext<LlvmCodegenBackend>, + exported_symbols_for_lto: &[String], + each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, ThinBuffer)>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, -) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> { +) -> (Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>) { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); - let (symbols_below_threshold, upstream_modules) = prepare_lto(cgcx, dcx)?; + let (symbols_below_threshold, upstream_modules) = + prepare_lto(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, dcx); let symbols_below_threshold = symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>(); if cgcx.opts.cg.linker_plugin_lto.enabled() { @@ -245,10 +200,9 @@ fn fat_lto( cgcx: &CodegenContext<LlvmCodegenBackend>, dcx: DiagCtxtHandle<'_>, modules: Vec<FatLtoInput<LlvmCodegenBackend>>, - cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, symbols_below_threshold: &[*const libc::c_char], -) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> { +) -> ModuleCodegen<ModuleLlvm> { let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module"); info!("going for a fat lto"); @@ -258,21 +212,12 @@ fn fat_lto( // modules that are serialized in-memory. // * `in_memory` contains modules which are already parsed and in-memory, // such as from multi-CGU builds. - // - // All of `cached_modules` (cached from previous incremental builds) can - // immediately go onto the `serialized_modules` modules list and then we can - // split the `modules` array into these two lists. let mut in_memory = Vec::new(); - serialized_modules.extend(cached_modules.into_iter().map(|(buffer, wp)| { - info!("pushing cached module {:?}", wp.cgu_name); - (buffer, CString::new(wp.cgu_name).unwrap()) - })); for module in modules { match module { FatLtoInput::InMemory(m) => in_memory.push(m), FatLtoInput::Serialized { name, buffer } => { info!("pushing serialized module {:?}", name); - let buffer = SerializedModule::Local(buffer); serialized_modules.push((buffer, CString::new(name).unwrap())); } } @@ -308,7 +253,7 @@ fn fat_lto( assert!(!serialized_modules.is_empty(), "must have at least one serialized module"); let (buffer, name) = serialized_modules.remove(0); info!("no in-memory regular modules to choose from, parsing {:?}", name); - let llvm_module = ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx)?; + let llvm_module = ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx); ModuleCodegen::new_regular(name.into_string().unwrap(), llvm_module) } }; @@ -349,7 +294,9 @@ fn fat_lto( }); info!("linking {:?}", name); let data = bc_decoded.data(); - linker.add(data).map_err(|()| write::llvm_err(dcx, LlvmError::LoadBitcode { name }))?; + linker + .add(data) + .unwrap_or_else(|()| write::llvm_err(dcx, LlvmError::LoadBitcode { name })); } drop(linker); save_temp_bitcode(cgcx, &module, "lto.input"); @@ -366,7 +313,7 @@ fn fat_lto( save_temp_bitcode(cgcx, &module, "lto.after-restriction"); } - Ok(LtoModuleCodegen::Fat(module)) + module } pub(crate) struct Linker<'a>(&'a mut llvm::Linker<'a>); @@ -436,7 +383,7 @@ fn thin_lto( serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, symbols_below_threshold: &[*const libc::c_char], -) -> Result<(Vec<LtoModuleCodegen<LlvmCodegenBackend>>, Vec<WorkProduct>), FatalError> { +) -> (Vec<ThinModule<LlvmCodegenBackend>>, Vec<WorkProduct>) { let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis"); unsafe { info!("going for that thin, thin LTO"); @@ -506,7 +453,7 @@ fn thin_lto( symbols_below_threshold.as_ptr(), symbols_below_threshold.len(), ) - .ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?; + .unwrap_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext)); let data = ThinData(data); @@ -568,21 +515,18 @@ fn thin_lto( } info!(" - {}: re-compiled", module_name); - opt_jobs.push(LtoModuleCodegen::Thin(ThinModule { - shared: Arc::clone(&shared), - idx: module_index, - })); + opt_jobs.push(ThinModule { shared: Arc::clone(&shared), idx: module_index }); } // Save the current ThinLTO import information for the next compilation // session, overwriting the previous serialized data (if any). - if let Some(path) = key_map_path { - if let Err(err) = curr_key_map.save_to_file(&path) { - return Err(write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err })); - } + if let Some(path) = key_map_path + && let Err(err) = curr_key_map.save_to_file(&path) + { + write::llvm_err(dcx, LlvmError::WriteThinLtoKey { err }); } - Ok((opt_jobs, copy_jobs)) + (opt_jobs, copy_jobs) } } @@ -637,7 +581,7 @@ pub(crate) fn run_pass_manager( dcx: DiagCtxtHandle<'_>, module: &mut ModuleCodegen<ModuleLlvm>, thin: bool, -) -> Result<(), FatalError> { +) { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name); let config = cgcx.config(module.kind); @@ -657,6 +601,7 @@ pub(crate) fn run_pass_manager( // We then run the llvm_optimize function a second time, to optimize the code which we generated // in the enzyme differentiation pass. let enable_ad = config.autodiff.contains(&config::AutoDiff::Enable); + let enable_gpu = config.offload.contains(&config::Offload::Enable); let stage = if thin { write::AutodiffStage::PreAD } else { @@ -668,40 +613,21 @@ pub(crate) fn run_pass_manager( } unsafe { - write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?; + write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage); } - if cfg!(llvm_enzyme) && enable_ad && !thin { + if enable_gpu && !thin { let cx = SimpleCx::new(module.module_llvm.llmod(), &module.module_llvm.llcx, cgcx.pointer_size); + crate::builder::gpu_offload::handle_gpu_code(cgcx, &cx); + } - for function in cx.get_functions() { - let enzyme_marker = "enzyme_marker"; - if attributes::has_string_attr(function, enzyme_marker) { - // Sanity check: Ensure 'noinline' is present before replacing it. - assert!( - !attributes::has_attr(function, Function, llvm::AttributeKind::NoInline), - "Expected __enzyme function to have 'noinline' before adding 'alwaysinline'" - ); - - attributes::remove_from_llfn(function, Function, llvm::AttributeKind::NoInline); - attributes::remove_string_attr_from_llfn(function, enzyme_marker); - - assert!( - !attributes::has_string_attr(function, enzyme_marker), - "Expected function to not have 'enzyme_marker'" - ); - - let always_inline = llvm::AttributeKind::AlwaysInline.create_attr(cx.llcx); - attributes::apply_to_llfn(function, Function, &[always_inline]); - } - } - + if cfg!(llvm_enzyme) && enable_ad && !thin { let opt_stage = llvm::OptStage::FatLTO; let stage = write::AutodiffStage::PostAD; if !config.autodiff.contains(&config::AutoDiff::NoPostopt) { unsafe { - write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?; + write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage); } } @@ -713,7 +639,6 @@ pub(crate) fn run_pass_manager( } debug!("lto done"); - Ok(()) } pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer); @@ -806,7 +731,7 @@ impl Drop for ThinBuffer { pub(crate) fn optimize_thin_module( thin_module: ThinModule<LlvmCodegenBackend>, cgcx: &CodegenContext<LlvmCodegenBackend>, -) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> { +) -> ModuleCodegen<ModuleLlvm> { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); @@ -817,7 +742,7 @@ pub(crate) fn optimize_thin_module( // into that context. One day, however, we may do this for upstream // crates but for locally codegened modules we may be able to reuse // that LLVM Context and Module. - let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx)?; + let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx); let mut module = ModuleCodegen::new_regular(thin_module.name(), module_llvm); // Given that the newly created module lacks a thinlto buffer for embedding, we need to re-add it here. if cgcx.config(ModuleKind::Regular).embed_bitcode() { @@ -851,7 +776,7 @@ pub(crate) fn optimize_thin_module( .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name()); if unsafe { !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) } { - return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule)); + write::llvm_err(dcx, LlvmError::PrepareThinLtoModule); } save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve"); } @@ -862,7 +787,7 @@ pub(crate) fn optimize_thin_module( .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name()); if unsafe { !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) } { - return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule)); + write::llvm_err(dcx, LlvmError::PrepareThinLtoModule); } save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize"); } @@ -873,7 +798,7 @@ pub(crate) fn optimize_thin_module( if unsafe { !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target.raw()) } { - return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule)); + write::llvm_err(dcx, LlvmError::PrepareThinLtoModule); } save_temp_bitcode(cgcx, &module, "thin-lto-after-import"); } @@ -885,11 +810,11 @@ pub(crate) fn optimize_thin_module( // little differently. { info!("running thin lto passes over {}", module.name); - run_pass_manager(cgcx, dcx, &mut module, true)?; + run_pass_manager(cgcx, dcx, &mut module, true); save_temp_bitcode(cgcx, &module, "thin-lto-after-pm"); } } - Ok(module) + module } /// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys @@ -955,9 +880,9 @@ pub(crate) fn parse_module<'a>( name: &CStr, data: &[u8], dcx: DiagCtxtHandle<'_>, -) -> Result<&'a llvm::Module, FatalError> { +) -> &'a llvm::Module { unsafe { llvm::LLVMRustParseBitcodeForLTO(cx, data.as_ptr(), data.len(), name.as_ptr()) - .ok_or_else(|| write::llvm_err(dcx, LlvmError::ParseBitcode)) + .unwrap_or_else(|| write::llvm_err(dcx, LlvmError::ParseBitcode)) } } diff --git a/compiler/rustc_codegen_llvm/src/back/mod.rs b/compiler/rustc_codegen_llvm/src/back/mod.rs new file mode 100644 index 00000000000..6cb89f80ab8 --- /dev/null +++ b/compiler/rustc_codegen_llvm/src/back/mod.rs @@ -0,0 +1,5 @@ +pub(crate) mod archive; +pub(crate) mod lto; +pub(crate) mod owned_target_machine; +mod profiling; +pub(crate) mod write; diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs index dfde4595590..6d8178320fe 100644 --- a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs +++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs @@ -1,4 +1,5 @@ -use std::ffi::{CStr, c_char}; +use std::assert_matches::assert_matches; +use std::ffi::CStr; use std::marker::PhantomData; use std::ptr::NonNull; @@ -39,12 +40,11 @@ impl OwnedTargetMachine { debug_info_compression: &CStr, use_emulated_tls: bool, args_cstr_buff: &[u8], + use_wasm_eh: bool, ) -> Result<Self, LlvmError<'static>> { - assert!(args_cstr_buff.len() > 0); - assert!( - *args_cstr_buff.last().unwrap() == 0, - "The last character must be a null terminator." - ); + // The argument list is passed as the concatenation of one or more C strings. + // This implies that there must be a last byte, and it must be 0. + assert_matches!(args_cstr_buff, [.., b'\0'], "the last byte must be a NUL terminator"); // SAFETY: llvm::LLVMRustCreateTargetMachine copies pointed to data let tm_ptr = unsafe { @@ -70,8 +70,9 @@ impl OwnedTargetMachine { output_obj_file.as_ptr(), debug_info_compression.as_ptr(), use_emulated_tls, - args_cstr_buff.as_ptr() as *const c_char, + args_cstr_buff.as_ptr(), args_cstr_buff.len(), + use_wasm_eh, ) }; @@ -97,7 +98,7 @@ impl Drop for OwnedTargetMachine { // llvm::LLVMRustCreateTargetMachine OwnedTargetMachine is not copyable so there is no // double free or use after free. unsafe { - llvm::LLVMRustDisposeTargetMachine(self.tm_unique.as_mut()); + llvm::LLVMRustDisposeTargetMachine(self.tm_unique.as_ptr()); } } } diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index bde6a9cf4bc..7ea2ae6673b 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -15,11 +15,12 @@ use rustc_codegen_ssa::back::write::{ BitcodeSection, CodegenContext, EmitObj, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn, }; +use rustc_codegen_ssa::base::wants_wasm_eh; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, ModuleKind}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_errors::{DiagCtxtHandle, FatalError, Level}; +use rustc_errors::{DiagCtxtHandle, Level}; use rustc_fs_util::{link_or_copy, path_to_c_string}; use rustc_middle::ty::TyCtxt; use rustc_session::Session; @@ -45,10 +46,10 @@ use crate::llvm::{self, DiagnosticInfo}; use crate::type_::Type; use crate::{LlvmCodegenBackend, ModuleLlvm, base, common, llvm_util}; -pub(crate) fn llvm_err<'a>(dcx: DiagCtxtHandle<'_>, err: LlvmError<'a>) -> FatalError { +pub(crate) fn llvm_err<'a>(dcx: DiagCtxtHandle<'_>, err: LlvmError<'a>) -> ! { match llvm::last_error() { - Some(llvm_err) => dcx.emit_almost_fatal(WithLlvmError(err, llvm_err)), - None => dcx.emit_almost_fatal(err), + Some(llvm_err) => dcx.emit_fatal(WithLlvmError(err, llvm_err)), + None => dcx.emit_fatal(err), } } @@ -62,7 +63,7 @@ fn write_output_file<'ll>( file_type: llvm::FileType, self_profiler_ref: &SelfProfilerRef, verify_llvm_ir: bool, -) -> Result<(), FatalError> { +) { debug!("write_output_file output={:?} dwo_output={:?}", output, dwo_output); let output_c = path_to_c_string(output); let dwo_output_c; @@ -99,7 +100,7 @@ fn write_output_file<'ll>( } } - result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteOutput { path: output })) + result.into_result().unwrap_or_else(|()| llvm_err(dcx, LlvmError::WriteOutput { path: output })) } pub(crate) fn create_informational_target_machine( @@ -111,7 +112,7 @@ pub(crate) fn create_informational_target_machine( // system/tcx is set up. let features = llvm_util::global_llvm_features(sess, false, only_base_features); target_machine_factory(sess, config::OptLevel::No, &features)(config) - .unwrap_or_else(|err| llvm_err(sess.dcx(), err).raise()) + .unwrap_or_else(|err| llvm_err(sess.dcx(), err)) } pub(crate) fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine { @@ -138,7 +139,7 @@ pub(crate) fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTar tcx.backend_optimization_level(()), tcx.global_backend_features(()), )(config) - .unwrap_or_else(|err| llvm_err(tcx.dcx(), err).raise()) + .unwrap_or_else(|err| llvm_err(tcx.dcx(), err)) } fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) { @@ -285,6 +286,8 @@ pub(crate) fn target_machine_factory( let file_name_display_preference = sess.filename_display_preference(RemapPathScopeComponents::DEBUGINFO); + let use_wasm_eh = wants_wasm_eh(sess); + Arc::new(move |config: TargetMachineFactoryConfig| { let path_to_cstring_helper = |path: Option<PathBuf>| -> CString { let path = path.unwrap_or_default(); @@ -321,6 +324,7 @@ pub(crate) fn target_machine_factory( &debuginfo_compression, use_emulated_tls, &args_cstr_buff, + use_wasm_eh, ) }) } @@ -561,7 +565,7 @@ pub(crate) unsafe fn llvm_optimize( opt_level: config::OptLevel, opt_stage: llvm::OptStage, autodiff_stage: AutodiffStage, -) -> Result<(), FatalError> { +) { // Enzyme: // The whole point of compiler based AD is to differentiate optimized IR instead of unoptimized // source code. However, benchmarks show that optimizations increasing the code size @@ -700,7 +704,7 @@ pub(crate) unsafe fn llvm_optimize( llvm_plugins.len(), ) }; - result.into_result().map_err(|()| llvm_err(dcx, LlvmError::RunLlvmPasses)) + result.into_result().unwrap_or_else(|()| llvm_err(dcx, LlvmError::RunLlvmPasses)) } // Unsafe due to LLVM calls. @@ -709,7 +713,7 @@ pub(crate) fn optimize( dcx: DiagCtxtHandle<'_>, module: &mut ModuleCodegen<ModuleLlvm>, config: &ModuleConfig, -) -> Result<(), FatalError> { +) { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name); let llcx = &*module.module_llvm.llcx; @@ -761,7 +765,7 @@ pub(crate) fn optimize( opt_stage, autodiff_stage, ) - }?; + }; if let Some(thin_lto_buffer) = thin_lto_buffer { let thin_lto_buffer = unsafe { ThinBuffer::from_raw_ptr(thin_lto_buffer) }; module.thin_lto_buffer = Some(thin_lto_buffer.data().to_vec()); @@ -789,38 +793,16 @@ pub(crate) fn optimize( } } } - Ok(()) -} - -pub(crate) fn link( - cgcx: &CodegenContext<LlvmCodegenBackend>, - dcx: DiagCtxtHandle<'_>, - mut modules: Vec<ModuleCodegen<ModuleLlvm>>, -) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> { - use super::lto::{Linker, ModuleBuffer}; - // Sort the modules by name to ensure deterministic behavior. - modules.sort_by(|a, b| a.name.cmp(&b.name)); - let (first, elements) = - modules.split_first().expect("Bug! modules must contain at least one module."); - - let mut linker = Linker::new(first.module_llvm.llmod()); - for module in elements { - let _timer = cgcx.prof.generic_activity_with_arg("LLVM_link_module", &*module.name); - let buffer = ModuleBuffer::new(module.module_llvm.llmod()); - linker - .add(buffer.data()) - .map_err(|()| llvm_err(dcx, LlvmError::SerializeModule { name: &module.name }))?; - } - drop(linker); - Ok(modules.remove(0)) } pub(crate) fn codegen( cgcx: &CodegenContext<LlvmCodegenBackend>, - dcx: DiagCtxtHandle<'_>, module: ModuleCodegen<ModuleLlvm>, config: &ModuleConfig, -) -> Result<CompiledModule, FatalError> { +) -> CompiledModule { + let dcx = cgcx.create_dcx(); + let dcx = dcx.handle(); + let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name); { let llmod = module.module_llvm.llmod(); @@ -879,9 +861,7 @@ pub(crate) fn codegen( .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module.name); let thin_bc = module.thin_lto_buffer.as_deref().expect("cannot find embedded bitcode"); - unsafe { - embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, &thin_bc); - } + embed_bitcode(cgcx, llcx, llmod, &thin_bc); } } @@ -928,7 +908,9 @@ pub(crate) fn codegen( record_artifact_size(&cgcx.prof, "llvm_ir", &out); } - result.into_result().map_err(|()| llvm_err(dcx, LlvmError::WriteIr { path: &out }))?; + result + .into_result() + .unwrap_or_else(|()| llvm_err(dcx, LlvmError::WriteIr { path: &out })); } if config.emit_asm { @@ -945,7 +927,7 @@ pub(crate) fn codegen( // binaries. So we must clone the module to produce the asm output // if we are also producing object code. let llmod = if let EmitObj::ObjectCode(_) = config.emit_obj { - unsafe { llvm::LLVMCloneModule(llmod) } + llvm::LLVMCloneModule(llmod) } else { llmod }; @@ -959,7 +941,7 @@ pub(crate) fn codegen( llvm::FileType::AssemblyFile, &cgcx.prof, config.verify_llvm_ir, - )?; + ); } match config.emit_obj { @@ -995,7 +977,7 @@ pub(crate) fn codegen( llvm::FileType::ObjectFile, &cgcx.prof, config.verify_llvm_ir, - )?; + ); } EmitObj::Bitcode => { @@ -1028,7 +1010,7 @@ pub(crate) fn codegen( && cgcx.target_can_use_split_dwarf && cgcx.split_debuginfo != SplitDebuginfo::Off && cgcx.split_dwarf_kind == SplitDwarfKind::Split; - Ok(module.into_compiled_module( + module.into_compiled_module( config.emit_obj != EmitObj::None, dwarf_object_emitted, config.emit_bc, @@ -1036,7 +1018,7 @@ pub(crate) fn codegen( config.emit_ir, &cgcx.output_filenames, cgcx.invocation_temp.as_deref(), - )) + ) } fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: &[u8]) -> Vec<u8> { @@ -1073,11 +1055,10 @@ pub(crate) fn bitcode_section_name(cgcx: &CodegenContext<LlvmCodegenBackend>) -> } /// Embed the bitcode of an LLVM module for LTO in the LLVM module itself. -unsafe fn embed_bitcode( +fn embed_bitcode( cgcx: &CodegenContext<LlvmCodegenBackend>, llcx: &llvm::Context, llmod: &llvm::Module, - cmdline: &str, bitcode: &[u8], ) { // We're adding custom sections to the output object file, but we definitely @@ -1093,7 +1074,9 @@ unsafe fn embed_bitcode( // * Mach-O - this is for macOS. Inspecting the source code for the native // linker here shows that the `.llvmbc` and `.llvmcmd` sections are // automatically skipped by the linker. In that case there's nothing extra - // that we need to do here. + // that we need to do here. We do need to make sure that the + // `__LLVM,__cmdline` section exists even though it is empty as otherwise + // ld64 rejects the object file. // // * Wasm - the native LLD linker is hard-coded to skip `.llvmbc` and // `.llvmcmd` sections, so there's nothing extra we need to do. @@ -1115,43 +1098,40 @@ unsafe fn embed_bitcode( // Unfortunately, LLVM provides no way to set custom section flags. For ELF // and COFF we emit the sections using module level inline assembly for that // reason (see issue #90326 for historical background). - unsafe { - if cgcx.target_is_like_darwin - || cgcx.target_is_like_aix - || cgcx.target_arch == "wasm32" - || cgcx.target_arch == "wasm64" - { - // We don't need custom section flags, create LLVM globals. - let llconst = common::bytes_in_context(llcx, bitcode); - let llglobal = - llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.module"); - llvm::set_initializer(llglobal, llconst); - - llvm::set_section(llglobal, bitcode_section_name(cgcx)); - llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); - llvm::LLVMSetGlobalConstant(llglobal, llvm::True); - - let llconst = common::bytes_in_context(llcx, cmdline.as_bytes()); - let llglobal = - llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.cmdline"); - llvm::set_initializer(llglobal, llconst); - let section = if cgcx.target_is_like_darwin { - c"__LLVM,__cmdline" - } else if cgcx.target_is_like_aix { - c".info" - } else { - c".llvmcmd" - }; - llvm::set_section(llglobal, section); - llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); + + if cgcx.target_is_like_darwin + || cgcx.target_is_like_aix + || cgcx.target_arch == "wasm32" + || cgcx.target_arch == "wasm64" + { + // We don't need custom section flags, create LLVM globals. + let llconst = common::bytes_in_context(llcx, bitcode); + let llglobal = llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.module"); + llvm::set_initializer(llglobal, llconst); + + llvm::set_section(llglobal, bitcode_section_name(cgcx)); + llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); + llvm::LLVMSetGlobalConstant(llglobal, llvm::TRUE); + + let llconst = common::bytes_in_context(llcx, &[]); + let llglobal = llvm::add_global(llmod, common::val_ty(llconst), c"rustc.embedded.cmdline"); + llvm::set_initializer(llglobal, llconst); + let section = if cgcx.target_is_like_darwin { + c"__LLVM,__cmdline" + } else if cgcx.target_is_like_aix { + c".info" } else { - // We need custom section flags, so emit module-level inline assembly. - let section_flags = if cgcx.is_pe_coff { "n" } else { "e" }; - let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode); - llvm::append_module_inline_asm(llmod, &asm); - let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes()); - llvm::append_module_inline_asm(llmod, &asm); - } + c".llvmcmd" + }; + llvm::set_section(llglobal, section); + llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); + } else { + // We need custom section flags, so emit module-level inline assembly. + let section_flags = if cgcx.is_pe_coff { "n" } else { "e" }; + let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode); + llvm::append_module_inline_asm(llmod, &asm); + let asm = create_section_with_flags_asm(".llvmcmd", section_flags, &[]); + llvm::append_module_inline_asm(llmod, &asm); } } @@ -1182,7 +1162,7 @@ fn create_msvc_imps( .filter_map(|val| { // Exclude some symbols that we know are not Rust symbols. let name = llvm::get_value_name(val); - if ignored(name) { None } else { Some((val, name)) } + if ignored(&name) { None } else { Some((val, name)) } }) .map(move |(val, name)| { let mut imp_name = prefix.as_bytes().to_vec(); diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index 5dda836988c..9cc5d8dbc21 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -18,9 +18,10 @@ use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_codegen_ssa::traits::*; use rustc_data_structures::small_c_str::SmallCStr; +use rustc_hir::attrs::Linkage; use rustc_middle::dep_graph; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; -use rustc_middle::mir::mono::{Linkage, Visibility}; +use rustc_middle::mir::mono::Visibility; use rustc_middle::ty::TyCtxt; use rustc_session::config::DebugInfo; use rustc_span::Symbol; diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index d0aa7320b4b..37379586d58 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -3,6 +3,7 @@ use std::ops::Deref; use std::{iter, ptr}; pub(crate) mod autodiff; +pub(crate) mod gpu_offload; use libc::{c_char, c_uint, size_t}; use rustc_abi as abi; @@ -14,6 +15,7 @@ use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_data_structures::small_c_str::SmallCStr; use rustc_hir::def_id::DefId; +use rustc_middle::bug; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::layout::{ FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasTypingEnv, LayoutError, LayoutOfHelpers, @@ -23,7 +25,7 @@ use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; use rustc_sanitizers::{cfi, kcfi}; use rustc_session::config::OptLevel; use rustc_span::Span; -use rustc_target::callconv::FnAbi; +use rustc_target::callconv::{FnAbi, PassMode}; use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target}; use smallvec::SmallVec; use tracing::{debug, instrument}; @@ -33,7 +35,7 @@ use crate::attributes; use crate::common::Funclet; use crate::context::{CodegenCx, FullCx, GenericCx, SCx}; use crate::llvm::{ - self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, GEPNoWrapFlags, Metadata, True, + self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, GEPNoWrapFlags, Metadata, TRUE, ToLlvmBool, }; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; @@ -117,6 +119,74 @@ impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> { } bx } + + // The generic builder has less functionality and thus (unlike the other alloca) we can not + // easily jump to the beginning of the function to place our allocas there. We trust the user + // to manually do that. FIXME(offload): improve the genericCx and add more llvm wrappers to + // handle this. + pub(crate) fn direct_alloca(&mut self, ty: &'ll Type, align: Align, name: &str) -> &'ll Value { + let val = unsafe { + let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED); + llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); + // Cast to default addrspace if necessary + llvm::LLVMBuildPointerCast(self.llbuilder, alloca, self.cx.type_ptr(), UNNAMED) + }; + if name != "" { + let name = std::ffi::CString::new(name).unwrap(); + llvm::set_value_name(val, &name.as_bytes()); + } + val + } + + pub(crate) fn inbounds_gep( + &mut self, + ty: &'ll Type, + ptr: &'ll Value, + indices: &[&'ll Value], + ) -> &'ll Value { + unsafe { + llvm::LLVMBuildGEPWithNoWrapFlags( + self.llbuilder, + ty, + ptr, + indices.as_ptr(), + indices.len() as c_uint, + UNNAMED, + GEPNoWrapFlags::InBounds, + ) + } + } + + pub(crate) fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { + debug!("Store {:?} -> {:?}", val, ptr); + assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer); + unsafe { + let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); + llvm::LLVMSetAlignment(store, align.bytes() as c_uint); + store + } + } + + pub(crate) fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value { + unsafe { + let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED); + llvm::LLVMSetAlignment(load, align.bytes() as c_uint); + load + } + } + + fn memset(&mut self, ptr: &'ll Value, fill_byte: &'ll Value, size: &'ll Value, align: Align) { + unsafe { + llvm::LLVMRustBuildMemSet( + self.llbuilder, + ptr, + align.bytes() as c_uint, + fill_byte, + size, + false, + ); + } + } } /// Empty string, to be used where LLVM expects an instruction name, indicating @@ -302,10 +372,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { return; } - let id_str = "branch_weights"; - let id = unsafe { - llvm::LLVMMDStringInContext2(self.cx.llcx, id_str.as_ptr().cast(), id_str.len()) - }; + let id = self.cx.create_metadata(b"branch_weights"); // For switch instructions with 2 targets, the `llvm.expect` intrinsic is used. // This function handles switch instructions with more than 2 targets and it needs to @@ -426,8 +493,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unsafe { let add = llvm::LLVMBuildAdd(self.llbuilder, a, b, UNNAMED); if llvm::LLVMIsAInstruction(add).is_some() { - llvm::LLVMSetNUW(add, True); - llvm::LLVMSetNSW(add, True); + llvm::LLVMSetNUW(add, TRUE); + llvm::LLVMSetNSW(add, TRUE); } add } @@ -436,8 +503,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unsafe { let sub = llvm::LLVMBuildSub(self.llbuilder, a, b, UNNAMED); if llvm::LLVMIsAInstruction(sub).is_some() { - llvm::LLVMSetNUW(sub, True); - llvm::LLVMSetNSW(sub, True); + llvm::LLVMSetNUW(sub, TRUE); + llvm::LLVMSetNSW(sub, TRUE); } sub } @@ -446,8 +513,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unsafe { let mul = llvm::LLVMBuildMul(self.llbuilder, a, b, UNNAMED); if llvm::LLVMIsAInstruction(mul).is_some() { - llvm::LLVMSetNUW(mul, True); - llvm::LLVMSetNSW(mul, True); + llvm::LLVMSetNUW(mul, TRUE); + llvm::LLVMSetNSW(mul, TRUE); } mul } @@ -461,7 +528,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { // an instruction, so we need to check before setting the flag. // (See also `LLVMBuildNUWNeg` which also needs a check.) if llvm::LLVMIsAInstruction(or).is_some() { - llvm::LLVMSetIsDisjoint(or, True); + llvm::LLVMSetIsDisjoint(or, TRUE); } or } @@ -490,13 +557,25 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let (size, signed) = ty.int_size_and_signed(self.tcx); let width = size.bits(); - if oop == OverflowOp::Sub && !signed { - // Emit sub and icmp instead of llvm.usub.with.overflow. LLVM considers these - // to be the canonical form. It will attempt to reform llvm.usub.with.overflow - // in the backend if profitable. - let sub = self.sub(lhs, rhs); - let cmp = self.icmp(IntPredicate::IntULT, lhs, rhs); - return (sub, cmp); + if !signed { + match oop { + OverflowOp::Sub => { + // Emit sub and icmp instead of llvm.usub.with.overflow. LLVM considers these + // to be the canonical form. It will attempt to reform llvm.usub.with.overflow + // in the backend if profitable. + let sub = self.sub(lhs, rhs); + let cmp = self.icmp(IntPredicate::IntULT, lhs, rhs); + return (sub, cmp); + } + OverflowOp::Add => { + // Like with sub above, using icmp is the preferred form. See + // <https://rust-lang.zulipchat.com/#narrow/channel/187780-t-compiler.2Fllvm/topic/.60uadd.2Ewith.2Eoverflow.60.20.28again.29/near/533041085> + let add = self.add(lhs, rhs); + let cmp = self.icmp(IntPredicate::IntULT, add, lhs); + return (add, cmp); + } + OverflowOp::Mul => {} + } } let oop_str = match oop { @@ -538,16 +617,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn dynamic_alloca(&mut self, size: &'ll Value, align: Align) -> &'ll Value { - unsafe { - let alloca = - llvm::LLVMBuildArrayAlloca(self.llbuilder, self.cx().type_i8(), size, UNNAMED); - llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); - // Cast to default addrspace if necessary - llvm::LLVMBuildPointerCast(self.llbuilder, alloca, self.cx().type_ptr(), UNNAMED) - } - } - fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value { unsafe { let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED); @@ -560,7 +629,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value { unsafe { let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED); - llvm::LLVMSetVolatile(load, llvm::True); + llvm::LLVMSetVolatile(load, llvm::TRUE); load } } @@ -631,10 +700,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { bx.nonnull_metadata(load); } - if let Some(pointee) = layout.pointee_info_at(bx, offset) { - if let Some(_) = pointee.safe { - bx.align_metadata(load, pointee.align); - } + if let Some(pointee) = layout.pointee_info_at(bx, offset) + && let Some(_) = pointee.safe + { + bx.align_metadata(load, pointee.align); } } abi::Primitive::Float(_) => {} @@ -647,17 +716,16 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } else if place.layout.is_llvm_immediate() { let mut const_llval = None; let llty = place.layout.llvm_type(self); - unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) { - if llvm::LLVMIsGlobalConstant(global) == llvm::True { - if let Some(init) = llvm::LLVMGetInitializer(global) { - if self.val_ty(init) == llty { - const_llval = Some(init); - } + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) { + if llvm::LLVMIsGlobalConstant(global).is_true() { + if let Some(init) = llvm::LLVMGetInitializer(global) { + if self.val_ty(init) == llty { + const_llval = Some(init); } } } } + let llval = const_llval.unwrap_or_else(|| { let load = self.load(llty, place.val.llval, place.val.align); if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr { @@ -770,7 +838,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() as c_uint }; llvm::LLVMSetAlignment(store, align); if flags.contains(MemFlags::VOLATILE) { - llvm::LLVMSetVolatile(store, llvm::True); + llvm::LLVMSetVolatile(store, llvm::TRUE); } if flags.contains(MemFlags::NONTEMPORAL) { // Make sure that the current target architectures supports "sane" non-temporal @@ -888,7 +956,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let trunc = self.trunc(val, dest_ty); unsafe { if llvm::LLVMIsAInstruction(trunc).is_some() { - llvm::LLVMSetNUW(trunc, True); + llvm::LLVMSetNUW(trunc, TRUE); } } trunc @@ -900,7 +968,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let trunc = self.trunc(val, dest_ty); unsafe { if llvm::LLVMIsAInstruction(trunc).is_some() { - llvm::LLVMSetNSW(trunc, True); + llvm::LLVMSetNSW(trunc, TRUE); } } trunc @@ -999,13 +1067,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { unsafe { - llvm::LLVMBuildIntCast2( - self.llbuilder, - val, - dest_ty, - if is_signed { True } else { False }, - UNNAMED, - ) + llvm::LLVMBuildIntCast2(self.llbuilder, val, dest_ty, is_signed.to_llvm_bool(), UNNAMED) } } @@ -1161,7 +1223,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false); let landing_pad = self.landing_pad(ty, pers_fn, 0); unsafe { - llvm::LLVMSetCleanup(landing_pad, llvm::True); + llvm::LLVMSetCleanup(landing_pad, llvm::TRUE); } (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1)) } @@ -1249,7 +1311,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { failure_order: rustc_middle::ty::AtomicOrdering, weak: bool, ) -> (&'ll Value, &'ll Value) { - let weak = if weak { llvm::True } else { llvm::False }; unsafe { let value = llvm::LLVMBuildAtomicCmpXchg( self.llbuilder, @@ -1258,9 +1319,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { src, AtomicOrdering::from_generic(order), AtomicOrdering::from_generic(failure_order), - llvm::False, // SingleThreaded + llvm::FALSE, // SingleThreaded ); - llvm::LLVMSetWeak(value, weak); + llvm::LLVMSetWeak(value, weak.to_llvm_bool()); let val = self.extract_value(value, 0); let success = self.extract_value(value, 1); (val, success) @@ -1271,15 +1332,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { &mut self, op: rustc_codegen_ssa::common::AtomicRmwBinOp, dst: &'ll Value, - mut src: &'ll Value, + src: &'ll Value, order: rustc_middle::ty::AtomicOrdering, + ret_ptr: bool, ) -> &'ll Value { - // The only RMW operation that LLVM supports on pointers is compare-exchange. - let requires_cast_to_int = self.val_ty(src) == self.type_ptr() - && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg; - if requires_cast_to_int { - src = self.ptrtoint(src, self.type_isize()); - } + // FIXME: If `ret_ptr` is true and `src` is not a pointer, we *should* tell LLVM that the + // LHS is a pointer and the operation should be provenance-preserving, but LLVM does not + // currently support that (https://github.com/llvm/llvm-project/issues/120837). let mut res = unsafe { llvm::LLVMBuildAtomicRMW( self.llbuilder, @@ -1287,10 +1346,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { dst, src, AtomicOrdering::from_generic(order), - llvm::False, // SingleThreaded + llvm::FALSE, // SingleThreaded ) }; - if requires_cast_to_int { + if ret_ptr && self.val_ty(res) != self.type_ptr() { res = self.inttoptr(res, self.type_ptr()); } res @@ -1302,14 +1361,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { scope: SynchronizationScope, ) { let single_threaded = match scope { - SynchronizationScope::SingleThread => llvm::True, - SynchronizationScope::CrossThread => llvm::False, + SynchronizationScope::SingleThread => true, + SynchronizationScope::CrossThread => false, }; unsafe { llvm::LLVMBuildFence( self.llbuilder, AtomicOrdering::from_generic(order), - single_threaded, + single_threaded.to_llvm_bool(), UNNAMED, ); } @@ -1376,6 +1435,28 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { call } + fn tail_call( + &mut self, + llty: Self::Type, + fn_attrs: Option<&CodegenFnAttrs>, + fn_abi: &FnAbi<'tcx, Ty<'tcx>>, + llfn: Self::Value, + args: &[Self::Value], + funclet: Option<&Self::Funclet>, + instance: Option<Instance<'tcx>>, + ) { + let call = self.call(llty, fn_attrs, Some(fn_abi), llfn, args, funclet, instance); + llvm::LLVMSetTailCallKind(call, llvm::TailCallKind::MustTail); + + match &fn_abi.ret.mode { + PassMode::Ignore | PassMode::Indirect { .. } => self.ret_void(), + PassMode::Direct(_) | PassMode::Pair { .. } => self.ret(call), + mode @ PassMode::Cast { .. } => { + bug!("Encountered `PassMode::{mode:?}` during codegen") + } + } + } + fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, UNNAMED) } } @@ -1608,7 +1689,11 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { return; } - self.call_intrinsic(intrinsic, &[self.val_ty(ptr)], &[self.cx.const_u64(size), ptr]); + if crate::llvm_util::get_version() >= (22, 0, 0) { + self.call_intrinsic(intrinsic, &[self.val_ty(ptr)], &[ptr]); + } else { + self.call_intrinsic(intrinsic, &[self.val_ty(ptr)], &[self.cx.const_u64(size), ptr]); + } } } impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> { @@ -1731,7 +1816,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { } else { cfi::typeid_for_fnabi(self.tcx, fn_abi, options) }; - let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap(); + let typeid_metadata = self.cx.create_metadata(typeid.as_bytes()); let dbg_loc = self.get_dbg_loc(); // Test whether the function pointer is associated with the type identifier using the @@ -1808,48 +1893,4 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { ) { self.call_intrinsic("llvm.instrprof.increment", &[], &[fn_name, hash, num_counters, index]); } - - /// Emits a call to `llvm.instrprof.mcdc.parameters`. - /// - /// This doesn't produce any code directly, but is used as input by - /// the LLVM pass that handles coverage instrumentation. - /// - /// (See clang's [`CodeGenPGO::emitMCDCParameters`] for comparison.) - /// - /// [`CodeGenPGO::emitMCDCParameters`]: - /// https://github.com/rust-lang/llvm-project/blob/5399a24/clang/lib/CodeGen/CodeGenPGO.cpp#L1124 - #[instrument(level = "debug", skip(self))] - pub(crate) fn mcdc_parameters( - &mut self, - fn_name: &'ll Value, - hash: &'ll Value, - bitmap_bits: &'ll Value, - ) { - self.call_intrinsic("llvm.instrprof.mcdc.parameters", &[], &[fn_name, hash, bitmap_bits]); - } - - #[instrument(level = "debug", skip(self))] - pub(crate) fn mcdc_tvbitmap_update( - &mut self, - fn_name: &'ll Value, - hash: &'ll Value, - bitmap_index: &'ll Value, - mcdc_temp: &'ll Value, - ) { - let args = &[fn_name, hash, bitmap_index, mcdc_temp]; - self.call_intrinsic("llvm.instrprof.mcdc.tvbitmap.update", &[], args); - } - - #[instrument(level = "debug", skip(self))] - pub(crate) fn mcdc_condbitmap_reset(&mut self, mcdc_temp: &'ll Value) { - self.store(self.const_i32(0), mcdc_temp, self.tcx.data_layout.i32_align.abi); - } - - #[instrument(level = "debug", skip(self))] - pub(crate) fn mcdc_condbitmap_update(&mut self, cond_index: &'ll Value, mcdc_temp: &'ll Value) { - let align = self.tcx.data_layout.i32_align.abi; - let current_tv_index = self.load(self.cx.type_i32(), mcdc_temp, align); - let new_tv_index = self.add(current_tv_index, cond_index); - self.store(new_tv_index, mcdc_temp, align); - } } diff --git a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs index b07d9a5cfca..6ddf53cdc87 100644 --- a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs +++ b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs @@ -1,41 +1,92 @@ use std::ptr; -use rustc_ast::expand::autodiff_attrs::{AutoDiffAttrs, AutoDiffItem, DiffActivity, DiffMode}; -use rustc_codegen_ssa::ModuleCodegen; -use rustc_codegen_ssa::back::write::ModuleConfig; +use rustc_ast::expand::autodiff_attrs::{AutoDiffAttrs, DiffActivity, DiffMode}; use rustc_codegen_ssa::common::TypeKind; -use rustc_codegen_ssa::traits::BaseTypeCodegenMethods; -use rustc_errors::FatalError; -use rustc_middle::bug; -use tracing::{debug, trace}; +use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods}; +use rustc_middle::ty::{PseudoCanonicalInput, Ty, TyCtxt, TypingEnv}; +use rustc_middle::{bug, ty}; +use tracing::debug; -use crate::back::write::llvm_err; -use crate::builder::{SBuilder, UNNAMED}; +use crate::builder::{Builder, PlaceRef, UNNAMED}; use crate::context::SimpleCx; use crate::declare::declare_simple_fn; -use crate::errors::{AutoDiffWithoutEnable, LlvmError}; -use crate::llvm::AttributePlace::Function; -use crate::llvm::{Metadata, True}; +use crate::llvm; +use crate::llvm::{Metadata, TRUE, Type}; use crate::value::Value; -use crate::{CodegenContext, LlvmCodegenBackend, ModuleLlvm, attributes, llvm}; -fn get_params(fnc: &Value) -> Vec<&Value> { - let param_num = llvm::LLVMCountParams(fnc) as usize; - let mut fnc_args: Vec<&Value> = vec![]; - fnc_args.reserve(param_num); - unsafe { - llvm::LLVMGetParams(fnc, fnc_args.as_mut_ptr()); - fnc_args.set_len(param_num); +pub(crate) fn adjust_activity_to_abi<'tcx>( + tcx: TyCtxt<'tcx>, + fn_ty: Ty<'tcx>, + da: &mut Vec<DiffActivity>, +) { + if !matches!(fn_ty.kind(), ty::FnDef(..)) { + bug!("expected fn def for autodiff, got {:?}", fn_ty); } - fnc_args -} -fn has_sret(fnc: &Value) -> bool { - let num_args = llvm::LLVMCountParams(fnc) as usize; - if num_args == 0 { - false - } else { - unsafe { llvm::LLVMRustHasAttributeAtIndex(fnc, 0, llvm::AttributeKind::StructRet) } + // We don't actually pass the types back into the type system. + // All we do is decide how to handle the arguments. + let sig = fn_ty.fn_sig(tcx).skip_binder(); + + let mut new_activities = vec![]; + let mut new_positions = vec![]; + for (i, ty) in sig.inputs().iter().enumerate() { + if let Some(inner_ty) = ty.builtin_deref(true) { + if inner_ty.is_slice() { + // Now we need to figure out the size of each slice element in memory to allow + // safety checks and usability improvements in the backend. + let sty = match inner_ty.builtin_index() { + Some(sty) => sty, + None => { + panic!("slice element type unknown"); + } + }; + let pci = PseudoCanonicalInput { + typing_env: TypingEnv::fully_monomorphized(), + value: sty, + }; + + let layout = tcx.layout_of(pci); + let elem_size = match layout { + Ok(layout) => layout.size, + Err(_) => { + bug!("autodiff failed to compute slice element size"); + } + }; + let elem_size: u32 = elem_size.bytes() as u32; + + // We know that the length will be passed as extra arg. + if !da.is_empty() { + // We are looking at a slice. The length of that slice will become an + // extra integer on llvm level. Integers are always const. + // However, if the slice get's duplicated, we want to know to later check the + // size. So we mark the new size argument as FakeActivitySize. + // There is one FakeActivitySize per slice, so for convenience we store the + // slice element size in bytes in it. We will use the size in the backend. + let activity = match da[i] { + DiffActivity::DualOnly + | DiffActivity::Dual + | DiffActivity::Dualv + | DiffActivity::DuplicatedOnly + | DiffActivity::Duplicated => { + DiffActivity::FakeActivitySize(Some(elem_size)) + } + DiffActivity::Const => DiffActivity::Const, + _ => bug!("unexpected activity for ptr/ref"), + }; + new_activities.push(activity); + new_positions.push(i + 1); + } + + continue; + } + } + } + // now add the extra activities coming from slices + // Reverse order to not invalidate the indices + for _ in 0..new_activities.len() { + let pos = new_positions.pop().unwrap(); + let activity = new_activities.pop().unwrap(); + da.insert(pos, activity); } } @@ -49,14 +100,13 @@ fn has_sret(fnc: &Value) -> bool { // need to match those. // FIXME(ZuseZ4): This logic is a bit more complicated than it should be, can we simplify it // using iterators and peek()? -fn match_args_from_caller_to_enzyme<'ll>( +fn match_args_from_caller_to_enzyme<'ll, 'tcx>( cx: &SimpleCx<'ll>, - builder: &SBuilder<'ll, 'll>, + builder: &mut Builder<'_, 'll, 'tcx>, width: u32, args: &mut Vec<&'ll llvm::Value>, inputs: &[DiffActivity], outer_args: &[&'ll llvm::Value], - has_sret: bool, ) { debug!("matching autodiff arguments"); // We now handle the issue that Rust level arguments not always match the llvm-ir level @@ -68,20 +118,12 @@ fn match_args_from_caller_to_enzyme<'ll>( let mut outer_pos: usize = 0; let mut activity_pos = 0; - if has_sret { - // Then the first outer arg is the sret pointer. Enzyme doesn't know about sret, so the - // inner function will still return something. We increase our outer_pos by one, - // and once we're done with all other args we will take the return of the inner call and - // update the sret pointer with it - outer_pos = 1; - } - - let enzyme_const = cx.create_metadata("enzyme_const".to_string()).unwrap(); - let enzyme_out = cx.create_metadata("enzyme_out".to_string()).unwrap(); - let enzyme_dup = cx.create_metadata("enzyme_dup".to_string()).unwrap(); - let enzyme_dupv = cx.create_metadata("enzyme_dupv".to_string()).unwrap(); - let enzyme_dupnoneed = cx.create_metadata("enzyme_dupnoneed".to_string()).unwrap(); - let enzyme_dupnoneedv = cx.create_metadata("enzyme_dupnoneedv".to_string()).unwrap(); + let enzyme_const = cx.create_metadata(b"enzyme_const"); + let enzyme_out = cx.create_metadata(b"enzyme_out"); + let enzyme_dup = cx.create_metadata(b"enzyme_dup"); + let enzyme_dupv = cx.create_metadata(b"enzyme_dupv"); + let enzyme_dupnoneed = cx.create_metadata(b"enzyme_dupnoneed"); + let enzyme_dupnoneedv = cx.create_metadata(b"enzyme_dupnoneedv"); while activity_pos < inputs.len() { let diff_activity = inputs[activity_pos as usize]; @@ -194,92 +236,6 @@ fn match_args_from_caller_to_enzyme<'ll>( } } -// On LLVM-IR, we can luckily declare __enzyme_ functions without specifying the input -// arguments. We do however need to declare them with their correct return type. -// We already figured the correct return type out in our frontend, when generating the outer_fn, -// so we can now just go ahead and use that. This is not always trivial, e.g. because sret. -// Beyond sret, this article describes our challenges nicely: -// <https://yorickpeterse.com/articles/the-mess-that-is-handling-structure-arguments-and-returns-in-llvm/> -// I.e. (i32, f32) will get merged into i64, but we don't handle that yet. -fn compute_enzyme_fn_ty<'ll>( - cx: &SimpleCx<'ll>, - attrs: &AutoDiffAttrs, - fn_to_diff: &'ll Value, - outer_fn: &'ll Value, -) -> &'ll llvm::Type { - let fn_ty = cx.get_type_of_global(outer_fn); - let mut ret_ty = cx.get_return_type(fn_ty); - - let has_sret = has_sret(outer_fn); - - if has_sret { - // Now we don't just forward the return type, so we have to figure it out based on the - // primal return type, in combination with the autodiff settings. - let fn_ty = cx.get_type_of_global(fn_to_diff); - let inner_ret_ty = cx.get_return_type(fn_ty); - - let void_ty = unsafe { llvm::LLVMVoidTypeInContext(cx.llcx) }; - if inner_ret_ty == void_ty { - // This indicates that even the inner function has an sret. - // Right now I only look for an sret in the outer function. - // This *probably* needs some extra handling, but I never ran - // into such a case. So I'll wait for user reports to have a test case. - bug!("sret in inner function"); - } - - if attrs.width == 1 { - // Enzyme returns a struct of style: - // `{ original_ret(if requested), float, float, ... }` - let mut struct_elements = vec![]; - if attrs.has_primal_ret() { - struct_elements.push(inner_ret_ty); - } - // Next, we push the list of active floats, since they will be lowered to `enzyme_out`, - // and therefore part of the return struct. - let param_tys = cx.func_params_types(fn_ty); - for (act, param_ty) in attrs.input_activity.iter().zip(param_tys) { - if matches!(act, DiffActivity::Active) { - // Now find the float type at position i based on the fn_ty, - // to know what (f16/f32/f64/...) to add to the struct. - struct_elements.push(param_ty); - } - } - ret_ty = cx.type_struct(&struct_elements, false); - } else { - // First we check if we also have to deal with the primal return. - match attrs.mode { - DiffMode::Forward => match attrs.ret_activity { - DiffActivity::Dual => { - let arr_ty = - unsafe { llvm::LLVMArrayType2(inner_ret_ty, attrs.width as u64 + 1) }; - ret_ty = arr_ty; - } - DiffActivity::DualOnly => { - let arr_ty = - unsafe { llvm::LLVMArrayType2(inner_ret_ty, attrs.width as u64) }; - ret_ty = arr_ty; - } - DiffActivity::Const => { - todo!("Not sure, do we need to do something here?"); - } - _ => { - bug!("unreachable"); - } - }, - DiffMode::Reverse => { - todo!("Handle sret for reverse mode"); - } - _ => { - bug!("unreachable"); - } - } - } - } - - // LLVM can figure out the input types on it's own, so we take a shortcut here. - unsafe { llvm::LLVMFunctionType(ret_ty, ptr::null(), 0, True) } -} - /// When differentiating `fn_to_diff`, take a `outer_fn` and generate another /// function with expected naming and calling conventions[^1] which will be /// discovered by the enzyme LLVM pass and its body populated with the differentiated @@ -289,11 +245,15 @@ fn compute_enzyme_fn_ty<'ll>( /// [^1]: <https://enzyme.mit.edu/getting_started/CallingConvention/> // FIXME(ZuseZ4): `outer_fn` should include upstream safety checks to // cover some assumptions of enzyme/autodiff, which could lead to UB otherwise. -fn generate_enzyme_call<'ll>( +pub(crate) fn generate_enzyme_call<'ll, 'tcx>( + builder: &mut Builder<'_, 'll, 'tcx>, cx: &SimpleCx<'ll>, fn_to_diff: &'ll Value, - outer_fn: &'ll Value, + outer_name: &str, + ret_ty: &'ll Type, + fn_args: &[&'ll Value], attrs: AutoDiffAttrs, + dest: PlaceRef<'tcx, &'ll Value>, ) { // We have to pick the name depending on whether we want forward or reverse mode autodiff. let mut ad_name: String = match attrs.mode { @@ -303,11 +263,9 @@ fn generate_enzyme_call<'ll>( } .to_string(); - // add outer_fn name to ad_name to make it unique, in case users apply autodiff to multiple + // add outer_name to ad_name to make it unique, in case users apply autodiff to multiple // functions. Unwrap will only panic, if LLVM gave us an invalid string. - let name = llvm::get_value_name(outer_fn); - let outer_fn_name = std::str::from_utf8(name).unwrap(); - ad_name.push_str(outer_fn_name); + ad_name.push_str(outer_name); // Let us assume the user wrote the following function square: // @@ -317,14 +275,8 @@ fn generate_enzyme_call<'ll>( // %0 = fmul double %x, %x // ret double %0 // } - // ``` - // - // The user now applies autodiff to the function square, in which case fn_to_diff will be `square`. - // Our macro generates the following placeholder code (slightly simplified): // - // ```llvm // define double @dsquare(double %x) { - // ; placeholder code // return 0.0; // } // ``` @@ -341,176 +293,44 @@ fn generate_enzyme_call<'ll>( // ret double %0 // } // ``` - unsafe { - let enzyme_ty = compute_enzyme_fn_ty(cx, &attrs, fn_to_diff, outer_fn); - - // FIXME(ZuseZ4): the CC/Addr/Vis values are best effort guesses, we should look at tests and - // think a bit more about what should go here. - let cc = llvm::LLVMGetFunctionCallConv(outer_fn); - let ad_fn = declare_simple_fn( - cx, - &ad_name, - llvm::CallConv::try_from(cc).expect("invalid callconv"), - llvm::UnnamedAddr::No, - llvm::Visibility::Default, - enzyme_ty, - ); - - // Otherwise LLVM might inline our temporary code before the enzyme pass has a chance to - // do it's work. - let attr = llvm::AttributeKind::NoInline.create_attr(cx.llcx); - attributes::apply_to_llfn(ad_fn, Function, &[attr]); - - // We add a made-up attribute just such that we can recognize it after AD to update - // (no)-inline attributes. We'll then also remove this attribute. - let enzyme_marker_attr = llvm::CreateAttrString(cx.llcx, "enzyme_marker"); - attributes::apply_to_llfn(outer_fn, Function, &[enzyme_marker_attr]); - - // first, remove all calls from fnc - let entry = llvm::LLVMGetFirstBasicBlock(outer_fn); - let br = llvm::LLVMRustGetTerminator(entry); - llvm::LLVMRustEraseInstFromParent(br); - - let last_inst = llvm::LLVMRustGetLastInstruction(entry).unwrap(); - let mut builder = SBuilder::build(cx, entry); - - let num_args = llvm::LLVMCountParams(&fn_to_diff); - let mut args = Vec::with_capacity(num_args as usize + 1); - args.push(fn_to_diff); - - let enzyme_primal_ret = cx.create_metadata("enzyme_primal_return".to_string()).unwrap(); - if matches!(attrs.ret_activity, DiffActivity::Dual | DiffActivity::Active) { - args.push(cx.get_metadata_value(enzyme_primal_ret)); - } - if attrs.width > 1 { - let enzyme_width = cx.create_metadata("enzyme_width".to_string()).unwrap(); - args.push(cx.get_metadata_value(enzyme_width)); - args.push(cx.get_const_int(cx.type_i64(), attrs.width as u64)); - } - - let has_sret = has_sret(outer_fn); - let outer_args: Vec<&llvm::Value> = get_params(outer_fn); - match_args_from_caller_to_enzyme( - &cx, - &builder, - attrs.width, - &mut args, - &attrs.input_activity, - &outer_args, - has_sret, - ); - - let call = builder.call(enzyme_ty, ad_fn, &args, None); - - // This part is a bit iffy. LLVM requires that a call to an inlineable function has some - // metadata attached to it, but we just created this code oota. Given that the - // differentiated function already has partly confusing metadata, and given that this - // affects nothing but the auttodiff IR, we take a shortcut and just steal metadata from the - // dummy code which we inserted at a higher level. - // FIXME(ZuseZ4): Work with Enzyme core devs to clarify what debug metadata issues we have, - // and how to best improve it for enzyme core and rust-enzyme. - let md_ty = cx.get_md_kind_id("dbg"); - if llvm::LLVMRustHasMetadata(last_inst, md_ty) { - let md = llvm::LLVMRustDIGetInstMetadata(last_inst) - .expect("failed to get instruction metadata"); - let md_todiff = cx.get_metadata_value(md); - llvm::LLVMSetMetadata(call, md_ty, md_todiff); - } else { - // We don't panic, since depending on whether we are in debug or release mode, we might - // have no debug info to copy, which would then be ok. - trace!("no dbg info"); - } - - // Now that we copied the metadata, get rid of dummy code. - llvm::LLVMRustEraseInstUntilInclusive(entry, last_inst); - - if cx.val_ty(call) == cx.type_void() || has_sret { - if has_sret { - // This is what we already have in our outer_fn (shortened): - // define void @_foo(ptr <..> sret([32 x i8]) initializes((0, 32)) %0, <...>) { - // %7 = call [4 x double] (...) @__enzyme_fwddiff_foo(ptr @square, metadata !"enzyme_width", i64 4, <...>) - // <Here we are, we want to add the following two lines> - // store [4 x double] %7, ptr %0, align 8 - // ret void - // } - - // now store the result of the enzyme call into the sret pointer. - let sret_ptr = outer_args[0]; - let call_ty = cx.val_ty(call); - if attrs.width == 1 { - assert_eq!(cx.type_kind(call_ty), TypeKind::Struct); - } else { - assert_eq!(cx.type_kind(call_ty), TypeKind::Array); - } - llvm::LLVMBuildStore(&builder.llbuilder, call, sret_ptr); - } - builder.ret_void(); - } else { - builder.ret(call); - } - - // Let's crash in case that we messed something up above and generated invalid IR. - llvm::LLVMRustVerifyFunction( - outer_fn, - llvm::LLVMRustVerifierFailureAction::LLVMAbortProcessAction, - ); - } -} - -pub(crate) fn differentiate<'ll>( - module: &'ll ModuleCodegen<ModuleLlvm>, - cgcx: &CodegenContext<LlvmCodegenBackend>, - diff_items: Vec<AutoDiffItem>, - _config: &ModuleConfig, -) -> Result<(), FatalError> { - for item in &diff_items { - trace!("{}", item); + let enzyme_ty = unsafe { llvm::LLVMFunctionType(ret_ty, ptr::null(), 0, TRUE) }; + + // FIXME(ZuseZ4): the CC/Addr/Vis values are best effort guesses, we should look at tests and + // think a bit more about what should go here. + let cc = unsafe { llvm::LLVMGetFunctionCallConv(fn_to_diff) }; + let ad_fn = declare_simple_fn( + cx, + &ad_name, + llvm::CallConv::try_from(cc).expect("invalid callconv"), + llvm::UnnamedAddr::No, + llvm::Visibility::Default, + enzyme_ty, + ); + + let num_args = llvm::LLVMCountParams(&fn_to_diff); + let mut args = Vec::with_capacity(num_args as usize + 1); + args.push(fn_to_diff); + + let enzyme_primal_ret = cx.create_metadata(b"enzyme_primal_return"); + if matches!(attrs.ret_activity, DiffActivity::Dual | DiffActivity::Active) { + args.push(cx.get_metadata_value(enzyme_primal_ret)); } - - let diag_handler = cgcx.create_dcx(); - - let cx = SimpleCx::new(module.module_llvm.llmod(), module.module_llvm.llcx, cgcx.pointer_size); - - // First of all, did the user try to use autodiff without using the -Zautodiff=Enable flag? - if !diff_items.is_empty() - && !cgcx.opts.unstable_opts.autodiff.contains(&rustc_session::config::AutoDiff::Enable) - { - return Err(diag_handler.handle().emit_almost_fatal(AutoDiffWithoutEnable)); - } - - // Here we replace the placeholder code with the actual autodiff code, which calls Enzyme. - for item in diff_items.iter() { - let name = item.source.clone(); - let fn_def: Option<&llvm::Value> = cx.get_function(&name); - let Some(fn_def) = fn_def else { - return Err(llvm_err( - diag_handler.handle(), - LlvmError::PrepareAutoDiff { - src: item.source.clone(), - target: item.target.clone(), - error: "could not find source function".to_owned(), - }, - )); - }; - debug!(?item.target); - let fn_target: Option<&llvm::Value> = cx.get_function(&item.target); - let Some(fn_target) = fn_target else { - return Err(llvm_err( - diag_handler.handle(), - LlvmError::PrepareAutoDiff { - src: item.source.clone(), - target: item.target.clone(), - error: "could not find target function".to_owned(), - }, - )); - }; - - generate_enzyme_call(&cx, fn_def, fn_target, item.attrs.clone()); + if attrs.width > 1 { + let enzyme_width = cx.create_metadata(b"enzyme_width"); + args.push(cx.get_metadata_value(enzyme_width)); + args.push(cx.get_const_int(cx.type_i64(), attrs.width as u64)); } - // FIXME(ZuseZ4): support SanitizeHWAddress and prevent illegal/unsupported opts + match_args_from_caller_to_enzyme( + &cx, + builder, + attrs.width, + &mut args, + &attrs.input_activity, + fn_args, + ); - trace!("done with differentiate()"); + let call = builder.call(enzyme_ty, None, None, ad_fn, &args, None, None); - Ok(()) + builder.store_to_place(call, dest.val); } diff --git a/compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs b/compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs new file mode 100644 index 00000000000..1280ab1442a --- /dev/null +++ b/compiler/rustc_codegen_llvm/src/builder/gpu_offload.rs @@ -0,0 +1,439 @@ +use std::ffi::CString; + +use llvm::Linkage::*; +use rustc_abi::Align; +use rustc_codegen_ssa::back::write::CodegenContext; +use rustc_codegen_ssa::traits::BaseTypeCodegenMethods; + +use crate::builder::SBuilder; +use crate::common::AsCCharPtr; +use crate::llvm::AttributePlace::Function; +use crate::llvm::{self, Linkage, Type, Value}; +use crate::{LlvmCodegenBackend, SimpleCx, attributes}; + +pub(crate) fn handle_gpu_code<'ll>( + _cgcx: &CodegenContext<LlvmCodegenBackend>, + cx: &'ll SimpleCx<'_>, +) { + // The offload memory transfer type for each kernel + let mut o_types = vec![]; + let mut kernels = vec![]; + let offload_entry_ty = add_tgt_offload_entry(&cx); + for num in 0..9 { + let kernel = cx.get_function(&format!("kernel_{num}")); + if let Some(kernel) = kernel { + o_types.push(gen_define_handling(&cx, kernel, offload_entry_ty, num)); + kernels.push(kernel); + } + } + + gen_call_handling(&cx, &kernels, &o_types); +} + +// What is our @1 here? A magic global, used in our data_{begin/update/end}_mapper: +// @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1 +// @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @0 }, align 8 +fn generate_at_one<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll llvm::Value { + // @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1 + let unknown_txt = ";unknown;unknown;0;0;;"; + let c_entry_name = CString::new(unknown_txt).unwrap(); + let c_val = c_entry_name.as_bytes_with_nul(); + let initializer = crate::common::bytes_in_context(cx.llcx, c_val); + let at_zero = add_unnamed_global(&cx, &"", initializer, PrivateLinkage); + llvm::set_alignment(at_zero, Align::ONE); + + // @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 22, ptr @0 }, align 8 + let struct_ident_ty = cx.type_named_struct("struct.ident_t"); + let struct_elems = vec![ + cx.get_const_i32(0), + cx.get_const_i32(2), + cx.get_const_i32(0), + cx.get_const_i32(22), + at_zero, + ]; + let struct_elems_ty: Vec<_> = struct_elems.iter().map(|&x| cx.val_ty(x)).collect(); + let initializer = crate::common::named_struct(struct_ident_ty, &struct_elems); + cx.set_struct_body(struct_ident_ty, &struct_elems_ty, false); + let at_one = add_unnamed_global(&cx, &"", initializer, PrivateLinkage); + llvm::set_alignment(at_one, Align::EIGHT); + at_one +} + +pub(crate) fn add_tgt_offload_entry<'ll>(cx: &'ll SimpleCx<'_>) -> &'ll llvm::Type { + let offload_entry_ty = cx.type_named_struct("struct.__tgt_offload_entry"); + let tptr = cx.type_ptr(); + let ti64 = cx.type_i64(); + let ti32 = cx.type_i32(); + let ti16 = cx.type_i16(); + // For each kernel to run on the gpu, we will later generate one entry of this type. + // copied from LLVM + // typedef struct { + // uint64_t Reserved; + // uint16_t Version; + // uint16_t Kind; + // uint32_t Flags; Flags associated with the entry (see Target Region Entry Flags) + // void *Address; Address of global symbol within device image (function or global) + // char *SymbolName; + // uint64_t Size; Size of the entry info (0 if it is a function) + // uint64_t Data; + // void *AuxAddr; + // } __tgt_offload_entry; + let entry_elements = vec![ti64, ti16, ti16, ti32, tptr, tptr, ti64, ti64, tptr]; + cx.set_struct_body(offload_entry_ty, &entry_elements, false); + offload_entry_ty +} + +fn gen_tgt_kernel_global<'ll>(cx: &'ll SimpleCx<'_>) { + let kernel_arguments_ty = cx.type_named_struct("struct.__tgt_kernel_arguments"); + let tptr = cx.type_ptr(); + let ti64 = cx.type_i64(); + let ti32 = cx.type_i32(); + let tarr = cx.type_array(ti32, 3); + + // Taken from the LLVM APITypes.h declaration: + //struct KernelArgsTy { + // uint32_t Version = 0; // Version of this struct for ABI compatibility. + // uint32_t NumArgs = 0; // Number of arguments in each input pointer. + // void **ArgBasePtrs = + // nullptr; // Base pointer of each argument (e.g. a struct). + // void **ArgPtrs = nullptr; // Pointer to the argument data. + // int64_t *ArgSizes = nullptr; // Size of the argument data in bytes. + // int64_t *ArgTypes = nullptr; // Type of the data (e.g. to / from). + // void **ArgNames = nullptr; // Name of the data for debugging, possibly null. + // void **ArgMappers = nullptr; // User-defined mappers, possibly null. + // uint64_t Tripcount = + // 0; // Tripcount for the teams / distribute loop, 0 otherwise. + // struct { + // uint64_t NoWait : 1; // Was this kernel spawned with a `nowait` clause. + // uint64_t IsCUDA : 1; // Was this kernel spawned via CUDA. + // uint64_t Unused : 62; + // } Flags = {0, 0, 0}; + // // The number of teams (for x,y,z dimension). + // uint32_t NumTeams[3] = {0, 0, 0}; + // // The number of threads (for x,y,z dimension). + // uint32_t ThreadLimit[3] = {0, 0, 0}; + // uint32_t DynCGroupMem = 0; // Amount of dynamic cgroup memory requested. + //}; + let kernel_elements = + vec![ti32, ti32, tptr, tptr, tptr, tptr, tptr, tptr, ti64, ti64, tarr, tarr, ti32]; + + cx.set_struct_body(kernel_arguments_ty, &kernel_elements, false); + // For now we don't handle kernels, so for now we just add a global dummy + // to make sure that the __tgt_offload_entry is defined and handled correctly. + cx.declare_global("my_struct_global2", kernel_arguments_ty); +} + +fn gen_tgt_data_mappers<'ll>( + cx: &'ll SimpleCx<'_>, +) -> (&'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Value, &'ll llvm::Type) { + let tptr = cx.type_ptr(); + let ti64 = cx.type_i64(); + let ti32 = cx.type_i32(); + + let args = vec![tptr, ti64, ti32, tptr, tptr, tptr, tptr, tptr, tptr]; + let mapper_fn_ty = cx.type_func(&args, cx.type_void()); + let mapper_begin = "__tgt_target_data_begin_mapper"; + let mapper_update = "__tgt_target_data_update_mapper"; + let mapper_end = "__tgt_target_data_end_mapper"; + let begin_mapper_decl = declare_offload_fn(&cx, mapper_begin, mapper_fn_ty); + let update_mapper_decl = declare_offload_fn(&cx, mapper_update, mapper_fn_ty); + let end_mapper_decl = declare_offload_fn(&cx, mapper_end, mapper_fn_ty); + + let nounwind = llvm::AttributeKind::NoUnwind.create_attr(cx.llcx); + attributes::apply_to_llfn(begin_mapper_decl, Function, &[nounwind]); + attributes::apply_to_llfn(update_mapper_decl, Function, &[nounwind]); + attributes::apply_to_llfn(end_mapper_decl, Function, &[nounwind]); + + (begin_mapper_decl, update_mapper_decl, end_mapper_decl, mapper_fn_ty) +} + +fn add_priv_unnamed_arr<'ll>(cx: &SimpleCx<'ll>, name: &str, vals: &[u64]) -> &'ll llvm::Value { + let ti64 = cx.type_i64(); + let mut size_val = Vec::with_capacity(vals.len()); + for &val in vals { + size_val.push(cx.get_const_i64(val)); + } + let initializer = cx.const_array(ti64, &size_val); + add_unnamed_global(cx, name, initializer, PrivateLinkage) +} + +pub(crate) fn add_unnamed_global<'ll>( + cx: &SimpleCx<'ll>, + name: &str, + initializer: &'ll llvm::Value, + l: Linkage, +) -> &'ll llvm::Value { + let llglobal = add_global(cx, name, initializer, l); + llvm::LLVMSetUnnamedAddress(llglobal, llvm::UnnamedAddr::Global); + llglobal +} + +pub(crate) fn add_global<'ll>( + cx: &SimpleCx<'ll>, + name: &str, + initializer: &'ll llvm::Value, + l: Linkage, +) -> &'ll llvm::Value { + let c_name = CString::new(name).unwrap(); + let llglobal: &'ll llvm::Value = llvm::add_global(cx.llmod, cx.val_ty(initializer), &c_name); + llvm::set_global_constant(llglobal, true); + llvm::set_linkage(llglobal, l); + llvm::set_initializer(llglobal, initializer); + llglobal +} + +fn gen_define_handling<'ll>( + cx: &'ll SimpleCx<'_>, + kernel: &'ll llvm::Value, + offload_entry_ty: &'ll llvm::Type, + num: i64, +) -> &'ll llvm::Value { + let types = cx.func_params_types(cx.get_type_of_global(kernel)); + // It seems like non-pointer values are automatically mapped. So here, we focus on pointer (or + // reference) types. + let num_ptr_types = types + .iter() + .map(|&x| matches!(cx.type_kind(x), rustc_codegen_ssa::common::TypeKind::Pointer)) + .count(); + + // We do not know their size anymore at this level, so hardcode a placeholder. + // A follow-up pr will track these from the frontend, where we still have Rust types. + // Then, we will be able to figure out that e.g. `&[f32;256]` will result in 4*256 bytes. + // I decided that 1024 bytes is a great placeholder value for now. + add_priv_unnamed_arr(&cx, &format!(".offload_sizes.{num}"), &vec![1024; num_ptr_types]); + // Here we figure out whether something needs to be copied to the gpu (=1), from the gpu (=2), + // or both to and from the gpu (=3). Other values shouldn't affect us for now. + // A non-mutable reference or pointer will be 1, an array that's not read, but fully overwritten + // will be 2. For now, everything is 3, until we have our frontend set up. + let o_types = + add_priv_unnamed_arr(&cx, &format!(".offload_maptypes.{num}"), &vec![3; num_ptr_types]); + // Next: For each function, generate these three entries. A weak constant, + // the llvm.rodata entry name, and the omp_offloading_entries value + + let name = format!(".kernel_{num}.region_id"); + let initializer = cx.get_const_i8(0); + let region_id = add_unnamed_global(&cx, &name, initializer, WeakAnyLinkage); + + let c_entry_name = CString::new(format!("kernel_{num}")).unwrap(); + let c_val = c_entry_name.as_bytes_with_nul(); + let offload_entry_name = format!(".offloading.entry_name.{num}"); + + let initializer = crate::common::bytes_in_context(cx.llcx, c_val); + let llglobal = add_unnamed_global(&cx, &offload_entry_name, initializer, InternalLinkage); + llvm::set_alignment(llglobal, Align::ONE); + llvm::set_section(llglobal, c".llvm.rodata.offloading"); + + // Not actively used yet, for calling real kernels + let name = format!(".offloading.entry.kernel_{num}"); + + // See the __tgt_offload_entry documentation above. + let reserved = cx.get_const_i64(0); + let version = cx.get_const_i16(1); + let kind = cx.get_const_i16(1); + let flags = cx.get_const_i32(0); + let size = cx.get_const_i64(0); + let data = cx.get_const_i64(0); + let aux_addr = cx.const_null(cx.type_ptr()); + let elems = vec![reserved, version, kind, flags, region_id, llglobal, size, data, aux_addr]; + + let initializer = crate::common::named_struct(offload_entry_ty, &elems); + let c_name = CString::new(name).unwrap(); + let llglobal = llvm::add_global(cx.llmod, offload_entry_ty, &c_name); + llvm::set_global_constant(llglobal, true); + llvm::set_linkage(llglobal, WeakAnyLinkage); + llvm::set_initializer(llglobal, initializer); + llvm::set_alignment(llglobal, Align::ONE); + let c_section_name = CString::new(".omp_offloading_entries").unwrap(); + llvm::set_section(llglobal, &c_section_name); + o_types +} + +fn declare_offload_fn<'ll>( + cx: &'ll SimpleCx<'_>, + name: &str, + ty: &'ll llvm::Type, +) -> &'ll llvm::Value { + crate::declare::declare_simple_fn( + cx, + name, + llvm::CallConv::CCallConv, + llvm::UnnamedAddr::No, + llvm::Visibility::Default, + ty, + ) +} + +// For each kernel *call*, we now use some of our previous declared globals to move data to and from +// the gpu. We don't have a proper frontend yet, so we assume that every call to a kernel function +// from main is intended to run on the GPU. For now, we only handle the data transfer part of it. +// If two consecutive kernels use the same memory, we still move it to the host and back to the gpu. +// Since in our frontend users (by default) don't have to specify data transfer, this is something +// we should optimize in the future! We also assume that everything should be copied back and forth, +// but sometimes we can directly zero-allocate on the device and only move back, or if something is +// immutable, we might only copy it to the device, but not back. +// +// Current steps: +// 0. Alloca some variables for the following steps +// 1. set insert point before kernel call. +// 2. generate all the GEPS and stores, to be used in 3) +// 3. generate __tgt_target_data_begin calls to move data to the GPU +// +// unchanged: keep kernel call. Later move the kernel to the GPU +// +// 4. set insert point after kernel call. +// 5. generate all the GEPS and stores, to be used in 6) +// 6. generate __tgt_target_data_end calls to move data from the GPU +fn gen_call_handling<'ll>( + cx: &'ll SimpleCx<'_>, + _kernels: &[&'ll llvm::Value], + o_types: &[&'ll llvm::Value], +) { + // %struct.__tgt_bin_desc = type { i32, ptr, ptr, ptr } + let tptr = cx.type_ptr(); + let ti32 = cx.type_i32(); + let tgt_bin_desc_ty = vec![ti32, tptr, tptr, tptr]; + let tgt_bin_desc = cx.type_named_struct("struct.__tgt_bin_desc"); + cx.set_struct_body(tgt_bin_desc, &tgt_bin_desc_ty, false); + + gen_tgt_kernel_global(&cx); + let (begin_mapper_decl, _, end_mapper_decl, fn_ty) = gen_tgt_data_mappers(&cx); + + let main_fn = cx.get_function("main"); + let Some(main_fn) = main_fn else { return }; + let kernel_name = "kernel_1"; + let call = unsafe { + llvm::LLVMRustGetFunctionCall(main_fn, kernel_name.as_c_char_ptr(), kernel_name.len()) + }; + let Some(kernel_call) = call else { + return; + }; + let kernel_call_bb = unsafe { llvm::LLVMGetInstructionParent(kernel_call) }; + let called = unsafe { llvm::LLVMGetCalledValue(kernel_call).unwrap() }; + let mut builder = SBuilder::build(cx, kernel_call_bb); + + let types = cx.func_params_types(cx.get_type_of_global(called)); + let num_args = types.len() as u64; + + // Step 0) + // %struct.__tgt_bin_desc = type { i32, ptr, ptr, ptr } + // %6 = alloca %struct.__tgt_bin_desc, align 8 + unsafe { llvm::LLVMRustPositionBuilderPastAllocas(builder.llbuilder, main_fn) }; + + let tgt_bin_desc_alloca = builder.direct_alloca(tgt_bin_desc, Align::EIGHT, "EmptyDesc"); + + let ty = cx.type_array(cx.type_ptr(), num_args); + // Baseptr are just the input pointer to the kernel, stored in a local alloca + let a1 = builder.direct_alloca(ty, Align::EIGHT, ".offload_baseptrs"); + // Ptrs are the result of a gep into the baseptr, at least for our trivial types. + let a2 = builder.direct_alloca(ty, Align::EIGHT, ".offload_ptrs"); + // These represent the sizes in bytes, e.g. the entry for `&[f64; 16]` will be 8*16. + let ty2 = cx.type_array(cx.type_i64(), num_args); + let a4 = builder.direct_alloca(ty2, Align::EIGHT, ".offload_sizes"); + // Now we allocate once per function param, a copy to be passed to one of our maps. + let mut vals = vec![]; + let mut geps = vec![]; + let i32_0 = cx.get_const_i32(0); + for (index, in_ty) in types.iter().enumerate() { + // get function arg, store it into the alloca, and read it. + let p = llvm::get_param(called, index as u32); + let name = llvm::get_value_name(p); + let name = str::from_utf8(&name).unwrap(); + let arg_name = format!("{name}.addr"); + let alloca = builder.direct_alloca(in_ty, Align::EIGHT, &arg_name); + + builder.store(p, alloca, Align::EIGHT); + let val = builder.load(in_ty, alloca, Align::EIGHT); + let gep = builder.inbounds_gep(cx.type_f32(), val, &[i32_0]); + vals.push(val); + geps.push(gep); + } + + // Step 1) + unsafe { llvm::LLVMRustPositionBefore(builder.llbuilder, kernel_call) }; + builder.memset(tgt_bin_desc_alloca, cx.get_const_i8(0), cx.get_const_i64(32), Align::EIGHT); + + let mapper_fn_ty = cx.type_func(&[cx.type_ptr()], cx.type_void()); + let register_lib_decl = declare_offload_fn(&cx, "__tgt_register_lib", mapper_fn_ty); + let unregister_lib_decl = declare_offload_fn(&cx, "__tgt_unregister_lib", mapper_fn_ty); + let init_ty = cx.type_func(&[], cx.type_void()); + let init_rtls_decl = declare_offload_fn(cx, "__tgt_init_all_rtls", init_ty); + + // call void @__tgt_register_lib(ptr noundef %6) + builder.call(mapper_fn_ty, register_lib_decl, &[tgt_bin_desc_alloca], None); + // call void @__tgt_init_all_rtls() + builder.call(init_ty, init_rtls_decl, &[], None); + + for i in 0..num_args { + let idx = cx.get_const_i32(i); + let gep1 = builder.inbounds_gep(ty, a1, &[i32_0, idx]); + builder.store(vals[i as usize], gep1, Align::EIGHT); + let gep2 = builder.inbounds_gep(ty, a2, &[i32_0, idx]); + builder.store(geps[i as usize], gep2, Align::EIGHT); + let gep3 = builder.inbounds_gep(ty2, a4, &[i32_0, idx]); + // As mentioned above, we don't use Rust type information yet. So for now we will just + // assume that we have 1024 bytes, 256 f32 values. + // FIXME(offload): write an offload frontend and handle arbitrary types. + builder.store(cx.get_const_i64(1024), gep3, Align::EIGHT); + } + + // For now we have a very simplistic indexing scheme into our + // offload_{baseptrs,ptrs,sizes}. We will probably improve this along with our gpu frontend pr. + fn get_geps<'a, 'll>( + builder: &mut SBuilder<'a, 'll>, + cx: &'ll SimpleCx<'ll>, + ty: &'ll Type, + ty2: &'ll Type, + a1: &'ll Value, + a2: &'ll Value, + a4: &'ll Value, + ) -> (&'ll Value, &'ll Value, &'ll Value) { + let i32_0 = cx.get_const_i32(0); + + let gep1 = builder.inbounds_gep(ty, a1, &[i32_0, i32_0]); + let gep2 = builder.inbounds_gep(ty, a2, &[i32_0, i32_0]); + let gep3 = builder.inbounds_gep(ty2, a4, &[i32_0, i32_0]); + (gep1, gep2, gep3) + } + + fn generate_mapper_call<'a, 'll>( + builder: &mut SBuilder<'a, 'll>, + cx: &'ll SimpleCx<'ll>, + geps: (&'ll Value, &'ll Value, &'ll Value), + o_type: &'ll Value, + fn_to_call: &'ll Value, + fn_ty: &'ll Type, + num_args: u64, + s_ident_t: &'ll Value, + ) { + let nullptr = cx.const_null(cx.type_ptr()); + let i64_max = cx.get_const_i64(u64::MAX); + let num_args = cx.get_const_i32(num_args); + let args = + vec![s_ident_t, i64_max, num_args, geps.0, geps.1, geps.2, o_type, nullptr, nullptr]; + builder.call(fn_ty, fn_to_call, &args, None); + } + + // Step 2) + let s_ident_t = generate_at_one(&cx); + let o = o_types[0]; + let geps = get_geps(&mut builder, &cx, ty, ty2, a1, a2, a4); + generate_mapper_call(&mut builder, &cx, geps, o, begin_mapper_decl, fn_ty, num_args, s_ident_t); + + // Step 3) + // Here we will add code for the actual kernel launches in a follow-up PR. + // FIXME(offload): launch kernels + + // Step 4) + unsafe { llvm::LLVMRustPositionAfter(builder.llbuilder, kernel_call) }; + + let geps = get_geps(&mut builder, &cx, ty, ty2, a1, a2, a4); + generate_mapper_call(&mut builder, &cx, geps, o, end_mapper_decl, fn_ty, num_args, s_ident_t); + + builder.call(mapper_fn_ty, unregister_lib_decl, &[tgt_bin_desc_alloca], None); + + // With this we generated the following begin and end mappers. We could easily generate the + // update mapper in an update. + // call void @__tgt_target_data_begin_mapper(ptr @1, i64 -1, i32 3, ptr %27, ptr %28, ptr %29, ptr @.offload_maptypes, ptr null, ptr null) + // call void @__tgt_target_data_update_mapper(ptr @1, i64 -1, i32 2, ptr %46, ptr %47, ptr %48, ptr @.offload_maptypes.1, ptr null, ptr null) + // call void @__tgt_target_data_end_mapper(ptr @1, i64 -1, i32 3, ptr %49, ptr %50, ptr %51, ptr @.offload_maptypes, ptr null, ptr null) +} diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index 6d68eca60af..791a71d73ae 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -102,8 +102,8 @@ pub(crate) fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'t let is_hidden = if is_generic { // This is a monomorphization of a generic function. if !(cx.tcx.sess.opts.share_generics() - || tcx.codegen_fn_attrs(instance_def_id).inline - == rustc_attr_data_structures::InlineAttr::Never) + || tcx.codegen_instance_attrs(instance.def).inline + == rustc_hir::attrs::InlineAttr::Never) { // When not sharing generics, all instances are in the same // crate and have hidden visibility. diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 92f38565eef..11b79a7fe68 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -3,9 +3,8 @@ use std::borrow::Borrow; use libc::{c_char, c_uint}; -use rustc_abi as abi; -use rustc_abi::HasDataLayout; use rustc_abi::Primitive::Pointer; +use rustc_abi::{self as abi, HasDataLayout as _}; use rustc_ast::Mutability; use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::traits::*; @@ -21,7 +20,7 @@ use tracing::debug; use crate::consts::const_alloc_to_llvm; pub(crate) use crate::context::CodegenCx; use crate::context::{GenericCx, SCx}; -use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, Metadata, True}; +use crate::llvm::{self, BasicBlock, ConstantInt, FALSE, Metadata, TRUE, ToLlvmBool}; use crate::type_::Type; use crate::value::Value; @@ -119,6 +118,10 @@ impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { r } } + + pub(crate) fn const_null(&self, t: &'ll Type) -> &'ll Value { + unsafe { llvm::LLVMConstNull(t) } + } } impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { @@ -155,7 +158,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { self.type_kind(t) == TypeKind::Integer, "only allows integer types in const_int" ); - unsafe { llvm::LLVMConstInt(t, i as u64, True) } + unsafe { llvm::LLVMConstInt(t, i as u64, TRUE) } } fn const_u8(&self, i: u8) -> &'ll Value { @@ -189,7 +192,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { self.type_kind(t) == TypeKind::Integer, "only allows integer types in const_uint" ); - unsafe { llvm::LLVMConstInt(t, i, False) } + unsafe { llvm::LLVMConstInt(t, i, FALSE) } } fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value { @@ -216,10 +219,10 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { bug!("symbol `{}` is already defined", sym); }); llvm::set_initializer(g, sc); - unsafe { - llvm::LLVMSetGlobalConstant(g, True); - llvm::LLVMSetUnnamedAddress(g, llvm::UnnamedAddr::Global); - } + + llvm::set_global_constant(g, true); + llvm::set_unnamed_address(g, llvm::UnnamedAddr::Global); + llvm::set_linkage(g, llvm::Linkage::InternalLinkage); // Cast to default address space if globals are in a different addrspace let g = self.const_pointercast(g, self.type_ptr()); @@ -284,7 +287,8 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { self.const_bitcast(llval, llty) }; } else { - let init = const_alloc_to_llvm(self, alloc, /*static*/ false); + let init = + const_alloc_to_llvm(self, alloc.inner(), /*static*/ false); let alloc = alloc.inner(); let value = match alloc.mutability { Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), @@ -316,15 +320,19 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { }), ))) .unwrap_memory(); - let init = const_alloc_to_llvm(self, alloc, /*static*/ false); - let value = self.static_addr_of_impl(init, alloc.inner().align, None); - value + let init = const_alloc_to_llvm(self, alloc.inner(), /*static*/ false); + self.static_addr_of_impl(init, alloc.inner().align, None) } GlobalAlloc::Static(def_id) => { assert!(self.tcx.is_static(def_id)); assert!(!self.tcx.is_thread_local_static(def_id)); self.get_static(def_id) } + GlobalAlloc::TypeId { .. } => { + // Drop the provenance, the offset contains the bytes of the hash + let llval = self.const_usize(offset.bytes()); + return unsafe { llvm::LLVMConstIntToPtr(llval, llty) }; + } }; let base_addr_space = global_alloc.address_space(self); let llval = unsafe { @@ -346,7 +354,7 @@ impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { } fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value { - const_alloc_to_llvm(self, alloc, /*static*/ false) + const_alloc_to_llvm(self, alloc.inner(), /*static*/ false) } fn const_ptr_byte_offset(&self, base_addr: Self::Value, offset: abi::Size) -> Self::Value { @@ -369,17 +377,22 @@ pub(crate) fn val_ty(v: &Value) -> &Type { pub(crate) fn bytes_in_context<'ll>(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { unsafe { let ptr = bytes.as_ptr() as *const c_char; - llvm::LLVMConstStringInContext2(llcx, ptr, bytes.len(), True) + llvm::LLVMConstStringInContext2(llcx, ptr, bytes.len(), TRUE) } } +pub(crate) fn named_struct<'ll>(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + let len = c_uint::try_from(elts.len()).expect("LLVMConstStructInContext elements len overflow"); + unsafe { llvm::LLVMConstNamedStruct(ty, elts.as_ptr(), len) } +} + fn struct_in_context<'ll>( llcx: &'ll llvm::Context, elts: &[&'ll Value], packed: bool, ) -> &'ll Value { let len = c_uint::try_from(elts.len()).expect("LLVMConstStructInContext elements len overflow"); - unsafe { llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), len, packed as Bool) } + unsafe { llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), len, packed.to_llvm_bool()) } } #[inline] diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 28f5282c6b0..9ec7b0f80ae 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -4,33 +4,32 @@ use rustc_abi::{Align, HasDataLayout, Primitive, Scalar, Size, WrappingRange}; use rustc_codegen_ssa::common; use rustc_codegen_ssa::traits::*; use rustc_hir::LangItem; +use rustc_hir::attrs::Linkage; use rustc_hir::def::DefKind; -use rustc_hir::def_id::DefId; +use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; use rustc_middle::mir::interpret::{ Allocation, ConstAllocation, ErrorHandled, InitChunk, Pointer, Scalar as InterpScalar, read_target_uint, }; -use rustc_middle::mir::mono::{Linkage, MonoItem}; +use rustc_middle::mir::mono::MonoItem; use rustc_middle::ty::layout::{HasTypingEnv, LayoutOf}; use rustc_middle::ty::{self, Instance}; use rustc_middle::{bug, span_bug}; use tracing::{debug, instrument, trace}; -use crate::common::{AsCCharPtr, CodegenCx}; +use crate::common::CodegenCx; use crate::errors::SymbolAlreadyDefined; -use crate::llvm::{self, True}; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; -use crate::{base, debuginfo}; +use crate::{base, debuginfo, llvm}; pub(crate) fn const_alloc_to_llvm<'ll>( cx: &CodegenCx<'ll, '_>, - alloc: ConstAllocation<'_>, + alloc: &Allocation, is_static: bool, ) -> &'ll Value { - let alloc = alloc.inner(); // We expect that callers of const_alloc_to_llvm will instead directly codegen a pointer or // integer for any &ZST where the ZST is a constant (i.e. not a static). We should never be // producing empty LLVM allocations as they're just adding noise to binaries and forcing less @@ -141,7 +140,7 @@ fn codegen_static_initializer<'ll, 'tcx>( def_id: DefId, ) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> { let alloc = cx.tcx.eval_static_initializer(def_id)?; - Ok((const_alloc_to_llvm(cx, alloc, /*static*/ true), alloc)) + Ok((const_alloc_to_llvm(cx, alloc.inner(), /*static*/ true), alloc)) } fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) { @@ -193,8 +192,8 @@ fn check_and_apply_linkage<'ll, 'tcx>( // linkage and there are no definitions), then // `extern_with_linkage_foo` will instead be initialized to // zero. - let mut real_name = "_rust_extern_with_linkage_".to_string(); - real_name.push_str(sym); + let real_name = + format!("_rust_extern_with_linkage_{:016x}_{sym}", cx.tcx.stable_crate_id(LOCAL_CRATE)); let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| { cx.sess().dcx().emit_fatal(SymbolAlreadyDefined { span: cx.tcx.def_span(def_id), @@ -248,7 +247,7 @@ impl<'ll> CodegenCx<'ll, '_> { }; llvm::set_initializer(gv, cv); set_global_alignment(self, gv, align); - llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global); + llvm::set_unnamed_address(gv, llvm::UnnamedAddr::Global); gv } @@ -273,9 +272,8 @@ impl<'ll> CodegenCx<'ll, '_> { return gv; } let gv = self.static_addr_of_mut(cv, align, kind); - unsafe { - llvm::LLVMSetGlobalConstant(gv, True); - } + llvm::set_global_constant(gv, true); + self.const_globals.borrow_mut().insert(cv, gv); gv } @@ -399,149 +397,140 @@ impl<'ll> CodegenCx<'ll, '_> { } fn codegen_static_item(&mut self, def_id: DefId) { - unsafe { - assert!( - llvm::LLVMGetInitializer( - self.instances.borrow().get(&Instance::mono(self.tcx, def_id)).unwrap() - ) - .is_none() - ); - let attrs = self.tcx.codegen_fn_attrs(def_id); + assert!( + llvm::LLVMGetInitializer( + self.instances.borrow().get(&Instance::mono(self.tcx, def_id)).unwrap() + ) + .is_none() + ); + let attrs = self.tcx.codegen_fn_attrs(def_id); - let Ok((v, alloc)) = codegen_static_initializer(self, def_id) else { - // Error has already been reported - return; - }; - let alloc = alloc.inner(); + let Ok((v, alloc)) = codegen_static_initializer(self, def_id) else { + // Error has already been reported + return; + }; + let alloc = alloc.inner(); - let val_llty = self.val_ty(v); + let val_llty = self.val_ty(v); - let g = self.get_static_inner(def_id, val_llty); - let llty = self.get_type_of_global(g); + let g = self.get_static_inner(def_id, val_llty); + let llty = self.get_type_of_global(g); - let g = if val_llty == llty { - g - } else { - // codegen_static_initializer creates the global value just from the - // `Allocation` data by generating one big struct value that is just - // all the bytes and pointers after each other. This will almost never - // match the type that the static was declared with. Unfortunately - // we can't just LLVMConstBitCast our way out of it because that has very - // specific rules on what can be cast. So instead of adding a new way to - // generate static initializers that match the static's type, we picked - // the easier option and retroactively change the type of the static item itself. - let name = llvm::get_value_name(g).to_vec(); - llvm::set_value_name(g, b""); - - let linkage = llvm::get_linkage(g); - let visibility = llvm::get_visibility(g); - - let new_g = llvm::LLVMRustGetOrInsertGlobal( - self.llmod, - name.as_c_char_ptr(), - name.len(), - val_llty, - ); - - llvm::set_linkage(new_g, linkage); - llvm::set_visibility(new_g, visibility); - - // The old global has had its name removed but is returned by - // get_static since it is in the instance cache. Provide an - // alternative lookup that points to the new global so that - // global_asm! can compute the correct mangled symbol name - // for the global. - self.renamed_statics.borrow_mut().insert(def_id, new_g); - - // To avoid breaking any invariants, we leave around the old - // global for the moment; we'll replace all references to it - // with the new global later. (See base::codegen_backend.) - self.statics_to_rauw.borrow_mut().push((g, new_g)); - new_g - }; - set_global_alignment(self, g, alloc.align); - llvm::set_initializer(g, v); - - self.assume_dso_local(g, true); - - // Forward the allocation's mutability (picked by the const interner) to LLVM. - if alloc.mutability.is_not() { - llvm::LLVMSetGlobalConstant(g, llvm::True); - } + let g = if val_llty == llty { + g + } else { + // codegen_static_initializer creates the global value just from the + // `Allocation` data by generating one big struct value that is just + // all the bytes and pointers after each other. This will almost never + // match the type that the static was declared with. Unfortunately + // we can't just LLVMConstBitCast our way out of it because that has very + // specific rules on what can be cast. So instead of adding a new way to + // generate static initializers that match the static's type, we picked + // the easier option and retroactively change the type of the static item itself. + let name = String::from_utf8(llvm::get_value_name(g)) + .expect("we declare our statics with a utf8-valid name"); + llvm::set_value_name(g, b""); + + let linkage = llvm::get_linkage(g); + let visibility = llvm::get_visibility(g); + + let new_g = self.declare_global(&name, val_llty); + + llvm::set_linkage(new_g, linkage); + llvm::set_visibility(new_g, visibility); + + // The old global has had its name removed but is returned by + // get_static since it is in the instance cache. Provide an + // alternative lookup that points to the new global so that + // global_asm! can compute the correct mangled symbol name + // for the global. + self.renamed_statics.borrow_mut().insert(def_id, new_g); + + // To avoid breaking any invariants, we leave around the old + // global for the moment; we'll replace all references to it + // with the new global later. (See base::codegen_backend.) + self.statics_to_rauw.borrow_mut().push((g, new_g)); + new_g + }; + set_global_alignment(self, g, alloc.align); + llvm::set_initializer(g, v); - debuginfo::build_global_var_di_node(self, def_id, g); + self.assume_dso_local(g, true); - if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { - llvm::set_thread_local_mode(g, self.tls_model); - } + // Forward the allocation's mutability (picked by the const interner) to LLVM. + if alloc.mutability.is_not() { + llvm::set_global_constant(g, true); + } - // Wasm statics with custom link sections get special treatment as they - // go into custom sections of the wasm executable. The exception to this - // is the `.init_array` section which are treated specially by the wasm linker. - if self.tcx.sess.target.is_like_wasm - && attrs - .link_section - .map(|link_section| !link_section.as_str().starts_with(".init_array")) - .unwrap_or(true) - { - if let Some(section) = attrs.link_section { - let section = llvm::LLVMMDStringInContext2( - self.llcx, - section.as_str().as_c_char_ptr(), - section.as_str().len(), - ); - assert!(alloc.provenance().ptrs().is_empty()); - - // The `inspect` method is okay here because we checked for provenance, and - // because we are doing this access to inspect the final interpreter state (not - // as part of the interpreter execution). - let bytes = - alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()); - let alloc = - llvm::LLVMMDStringInContext2(self.llcx, bytes.as_c_char_ptr(), bytes.len()); - let data = [section, alloc]; - let meta = llvm::LLVMMDNodeInContext2(self.llcx, data.as_ptr(), data.len()); - let val = self.get_metadata_value(meta); + debuginfo::build_global_var_di_node(self, def_id, g); + + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, self.tls_model); + } + + // Wasm statics with custom link sections get special treatment as they + // go into custom sections of the wasm executable. The exception to this + // is the `.init_array` section which are treated specially by the wasm linker. + if self.tcx.sess.target.is_like_wasm + && attrs + .link_section + .map(|link_section| !link_section.as_str().starts_with(".init_array")) + .unwrap_or(true) + { + if let Some(section) = attrs.link_section { + let section = self.create_metadata(section.as_str().as_bytes()); + assert!(alloc.provenance().ptrs().is_empty()); + + // The `inspect` method is okay here because we checked for provenance, and + // because we are doing this access to inspect the final interpreter state (not + // as part of the interpreter execution). + let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()); + let alloc = self.create_metadata(bytes); + let data = [section, alloc]; + let meta = + unsafe { llvm::LLVMMDNodeInContext2(self.llcx, data.as_ptr(), data.len()) }; + let val = self.get_metadata_value(meta); + unsafe { llvm::LLVMAddNamedMetadataOperand( self.llmod, c"wasm.custom_sections".as_ptr(), val, - ); - } - } else { - base::set_link_section(g, attrs); + ) + }; } + } else { + base::set_link_section(g, attrs); + } - base::set_variable_sanitizer_attrs(g, attrs); - - if attrs.flags.contains(CodegenFnAttrFlags::USED_COMPILER) { - // `USED` and `USED_LINKER` can't be used together. - assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER)); - - // The semantics of #[used] in Rust only require the symbol to make it into the - // object file. It is explicitly allowed for the linker to strip the symbol if it - // is dead, which means we are allowed to use `llvm.compiler.used` instead of - // `llvm.used` here. - // - // Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique - // sections with SHF_GNU_RETAIN flag for llvm.used symbols, which may trigger bugs - // in the handling of `.init_array` (the static constructor list) in versions of - // the gold linker (prior to the one released with binutils 2.36). - // - // That said, we only ever emit these when `#[used(compiler)]` is explicitly - // requested. This is to avoid similar breakage on other targets, in particular - // MachO targets have *their* static constructor lists broken if `llvm.compiler.used` - // is emitted rather than `llvm.used`. However, that check happens when assigning - // the `CodegenFnAttrFlags` in the `codegen_fn_attrs` query, so we don't need to - // take care of it here. - self.add_compiler_used_global(g); - } - if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) { - // `USED` and `USED_LINKER` can't be used together. - assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_COMPILER)); + base::set_variable_sanitizer_attrs(g, attrs); + + if attrs.flags.contains(CodegenFnAttrFlags::USED_COMPILER) { + // `USED` and `USED_LINKER` can't be used together. + assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER)); + + // The semantics of #[used] in Rust only require the symbol to make it into the + // object file. It is explicitly allowed for the linker to strip the symbol if it + // is dead, which means we are allowed to use `llvm.compiler.used` instead of + // `llvm.used` here. + // + // Additionally, https://reviews.llvm.org/D97448 in LLVM 13 started emitting unique + // sections with SHF_GNU_RETAIN flag for llvm.used symbols, which may trigger bugs + // in the handling of `.init_array` (the static constructor list) in versions of + // the gold linker (prior to the one released with binutils 2.36). + // + // That said, we only ever emit these when `#[used(compiler)]` is explicitly + // requested. This is to avoid similar breakage on other targets, in particular + // MachO targets have *their* static constructor lists broken if `llvm.compiler.used` + // is emitted rather than `llvm.used`. However, that check happens when assigning + // the `CodegenFnAttrFlags` in the `codegen_fn_attrs` query, so we don't need to + // take care of it here. + self.add_compiler_used_global(g); + } + if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) { + // `USED` and `USED_LINKER` can't be used together. + assert!(!attrs.flags.contains(CodegenFnAttrFlags::USED_COMPILER)); - self.add_used_global(g); - } + self.add_used_global(g); } } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 90582e23b04..4fd6110ac4a 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -8,7 +8,6 @@ use std::str; use rustc_abi::{HasDataLayout, Size, TargetDataLayout, VariantIdx}; use rustc_codegen_ssa::back::versioned_llvm_target; use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh}; -use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::errors as ssa_errors; use rustc_codegen_ssa::traits::*; use rustc_data_structures::base_n::{ALPHANUMERIC_ONLY, ToBaseN}; @@ -34,7 +33,6 @@ use smallvec::SmallVec; use crate::back::write::to_llvm_code_model; use crate::callee::get_fn; -use crate::common::AsCCharPtr; use crate::debuginfo::metadata::apply_vcall_visibility_metadata; use crate::llvm::Metadata; use crate::type_::Type; @@ -169,6 +167,8 @@ pub(crate) unsafe fn create_module<'ll>( let mod_name = SmallCStr::new(mod_name); let llmod = unsafe { llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx) }; + let cx = SimpleCx::new(llmod, llcx, tcx.data_layout.pointer_size()); + let mut target_data_layout = sess.target.data_layout.to_string(); let llvm_version = llvm_util::get_version(); @@ -206,11 +206,22 @@ pub(crate) unsafe fn create_module<'ll>( // LLVM 21 updated the default layout on nvptx: https://github.com/llvm/llvm-project/pull/124961 target_data_layout = target_data_layout.replace("e-p6:32:32-i64", "e-i64"); } + if sess.target.arch == "amdgpu" { + // LLVM 21 adds the address width for address space 8. + // See https://github.com/llvm/llvm-project/pull/139419 + target_data_layout = target_data_layout.replace("p8:128:128:128:48", "p8:128:128") + } + } + if llvm_version < (22, 0, 0) { + if sess.target.arch == "avr" { + // LLVM 22.0 updated the default layout on avr: https://github.com/llvm/llvm-project/pull/153010 + target_data_layout = target_data_layout.replace("n8:16", "n8") + } } // Ensure the data-layout values hardcoded remain the defaults. { - let tm = crate::back::write::create_informational_target_machine(tcx.sess, false); + let tm = crate::back::write::create_informational_target_machine(sess, false); unsafe { llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm.raw()); } @@ -366,6 +377,15 @@ pub(crate) unsafe fn create_module<'ll>( } } + if let Some(regparm_count) = sess.opts.unstable_opts.regparm { + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Error, + "NumRegisterParameters", + regparm_count, + ); + } + if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection { if sess.target.arch == "aarch64" { llvm::add_module_flag_u32( @@ -451,6 +471,15 @@ pub(crate) unsafe fn create_module<'ll>( } } + if sess.opts.unstable_opts.indirect_branch_cs_prefix { + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Override, + "indirect_branch_cs_prefix", + 1, + ); + } + match (sess.opts.unstable_opts.small_data_threshold, sess.target.small_data_threshold_support()) { // Set up the small-data optimization limit for architectures that use @@ -473,18 +502,14 @@ pub(crate) unsafe fn create_module<'ll>( #[allow(clippy::option_env_unwrap)] let rustc_producer = format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION")); - let name_metadata = unsafe { - llvm::LLVMMDStringInContext2( - llcx, - rustc_producer.as_c_char_ptr(), - rustc_producer.as_bytes().len(), - ) - }; + + let name_metadata = cx.create_metadata(rustc_producer.as_bytes()); + unsafe { llvm::LLVMAddNamedMetadataOperand( llmod, c"llvm.ident".as_ptr(), - &llvm::LLVMMetadataAsValue(llcx, llvm::LLVMMDNodeInContext2(llcx, &name_metadata, 1)), + &cx.get_metadata_value(llvm::LLVMMDNodeInContext2(llcx, &name_metadata, 1)), ); } @@ -652,10 +677,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { } } impl<'ll> SimpleCx<'ll> { - pub(crate) fn get_return_type(&self, ty: &'ll Type) -> &'ll Type { - assert_eq!(self.type_kind(ty), TypeKind::Function); - unsafe { llvm::LLVMGetReturnType(ty) } - } pub(crate) fn get_type_of_global(&self, val: &'ll Value) -> &'ll Type { unsafe { llvm::LLVMGlobalGetValueType(val) } } @@ -680,7 +701,23 @@ impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { } pub(crate) fn get_const_int(&self, ty: &'ll Type, val: u64) -> &'ll Value { - unsafe { llvm::LLVMConstInt(ty, val, llvm::False) } + unsafe { llvm::LLVMConstInt(ty, val, llvm::FALSE) } + } + + pub(crate) fn get_const_i64(&self, n: u64) -> &'ll Value { + self.get_const_int(self.type_i64(), n) + } + + pub(crate) fn get_const_i32(&self, n: u64) -> &'ll Value { + self.get_const_int(self.type_i32(), n) + } + + pub(crate) fn get_const_i16(&self, n: u64) -> &'ll Value { + self.get_const_int(self.type_i16(), n) + } + + pub(crate) fn get_const_i8(&self, n: u64) -> &'ll Value { + self.get_const_int(self.type_i8(), n) } pub(crate) fn get_function(&self, name: &str) -> Option<&'ll Value> { @@ -698,20 +735,10 @@ impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { } } - pub(crate) fn create_metadata(&self, name: String) -> Option<&'ll Metadata> { - Some(unsafe { + pub(crate) fn create_metadata(&self, name: &[u8]) -> &'ll Metadata { + unsafe { llvm::LLVMMDStringInContext2(self.llcx(), name.as_ptr() as *const c_char, name.len()) - }) - } - - pub(crate) fn get_functions(&self) -> Vec<&'ll Value> { - let mut functions = vec![]; - let mut func = unsafe { llvm::LLVMGetFirstFunction(self.llmod()) }; - while let Some(f) = func { - functions.push(f); - func = unsafe { llvm::LLVMGetNextFunction(f) } } - functions } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs index f6000e72840..a4b60d420f3 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs @@ -73,48 +73,6 @@ pub(crate) struct CounterExpression { pub(crate) rhs: Counter, } -pub(crate) mod mcdc { - use rustc_middle::mir::coverage::{ConditionId, ConditionInfo, DecisionInfo}; - - /// Must match the layout of `LLVMRustMCDCDecisionParameters`. - #[repr(C)] - #[derive(Clone, Copy, Debug, Default)] - pub(crate) struct DecisionParameters { - bitmap_idx: u32, - num_conditions: u16, - } - - type LLVMConditionId = i16; - - /// Must match the layout of `LLVMRustMCDCBranchParameters`. - #[repr(C)] - #[derive(Clone, Copy, Debug, Default)] - pub(crate) struct BranchParameters { - condition_id: LLVMConditionId, - condition_ids: [LLVMConditionId; 2], - } - - impl From<ConditionInfo> for BranchParameters { - fn from(value: ConditionInfo) -> Self { - let to_llvm_cond_id = |cond_id: Option<ConditionId>| { - cond_id.and_then(|id| LLVMConditionId::try_from(id.as_usize()).ok()).unwrap_or(-1) - }; - let ConditionInfo { condition_id, true_next_id, false_next_id } = value; - Self { - condition_id: to_llvm_cond_id(Some(condition_id)), - condition_ids: [to_llvm_cond_id(false_next_id), to_llvm_cond_id(true_next_id)], - } - } - } - - impl From<DecisionInfo> for DecisionParameters { - fn from(info: DecisionInfo) -> Self { - let DecisionInfo { bitmap_idx, num_conditions } = info; - Self { bitmap_idx, num_conditions } - } - } -} - /// A span of source code coordinates to be embedded in coverage metadata. /// /// Must match the layout of `LLVMRustCoverageSpan`. @@ -148,26 +106,14 @@ pub(crate) struct Regions { pub(crate) code_regions: Vec<CodeRegion>, pub(crate) expansion_regions: Vec<ExpansionRegion>, pub(crate) branch_regions: Vec<BranchRegion>, - pub(crate) mcdc_branch_regions: Vec<MCDCBranchRegion>, - pub(crate) mcdc_decision_regions: Vec<MCDCDecisionRegion>, } impl Regions { /// Returns true if none of this structure's tables contain any regions. pub(crate) fn has_no_regions(&self) -> bool { - let Self { - code_regions, - expansion_regions, - branch_regions, - mcdc_branch_regions, - mcdc_decision_regions, - } = self; - - code_regions.is_empty() - && expansion_regions.is_empty() - && branch_regions.is_empty() - && mcdc_branch_regions.is_empty() - && mcdc_decision_regions.is_empty() + let Self { code_regions, expansion_regions, branch_regions } = self; + + code_regions.is_empty() && expansion_regions.is_empty() && branch_regions.is_empty() } } @@ -195,21 +141,3 @@ pub(crate) struct BranchRegion { pub(crate) true_counter: Counter, pub(crate) false_counter: Counter, } - -/// Must match the layout of `LLVMRustCoverageMCDCBranchRegion`. -#[derive(Clone, Debug)] -#[repr(C)] -pub(crate) struct MCDCBranchRegion { - pub(crate) cov_span: CoverageSpan, - pub(crate) true_counter: Counter, - pub(crate) false_counter: Counter, - pub(crate) mcdc_branch_params: mcdc::BranchParameters, -} - -/// Must match the layout of `LLVMRustCoverageMCDCDecisionRegion`. -#[derive(Clone, Debug)] -#[repr(C)] -pub(crate) struct MCDCDecisionRegion { - pub(crate) cov_span: CoverageSpan, - pub(crate) mcdc_decision_params: mcdc::DecisionParameters, -} diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/llvm_cov.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/llvm_cov.rs index 907d6d41a1f..bc4f6bb6a82 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/llvm_cov.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/llvm_cov.rs @@ -63,13 +63,7 @@ pub(crate) fn write_function_mappings_to_buffer( expressions: &[ffi::CounterExpression], regions: &ffi::Regions, ) -> Vec<u8> { - let ffi::Regions { - code_regions, - expansion_regions, - branch_regions, - mcdc_branch_regions, - mcdc_decision_regions, - } = regions; + let ffi::Regions { code_regions, expansion_regions, branch_regions } = regions; // SAFETY: // - All types are FFI-compatible and have matching representations in Rust/C++. @@ -87,10 +81,6 @@ pub(crate) fn write_function_mappings_to_buffer( expansion_regions.len(), branch_regions.as_ptr(), branch_regions.len(), - mcdc_branch_regions.as_ptr(), - mcdc_branch_regions.len(), - mcdc_decision_regions.as_ptr(), - mcdc_decision_regions.len(), buffer, ) }) diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index a9be833a643..d1cb95507d9 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -1,3 +1,4 @@ +use std::assert_matches::assert_matches; use std::sync::Arc; use itertools::Itertools; @@ -5,6 +6,7 @@ use rustc_abi::Align; use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, ConstCodegenMethods}; use rustc_data_structures::fx::FxIndexMap; use rustc_index::IndexVec; +use rustc_macros::TryFromU32; use rustc_middle::ty::TyCtxt; use rustc_session::RemapFileNameExt; use rustc_session::config::RemapPathScopeComponents; @@ -20,6 +22,23 @@ mod covfun; mod spans; mod unused; +/// Version number that will be included the `__llvm_covmap` section header. +/// Corresponds to LLVM's `llvm::coverage::CovMapVersion` (in `CoverageMapping.h`), +/// or at least the subset that we know and care about. +/// +/// Note that version `n` is encoded as `(n-1)`. +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, TryFromU32)] +enum CovmapVersion { + /// Used by LLVM 18 onwards. + Version7 = 6, +} + +impl CovmapVersion { + fn to_u32(self) -> u32 { + self as u32 + } +} + /// Generates and exports the coverage map, which is embedded in special /// linker sections in the final binary. /// @@ -29,38 +48,28 @@ pub(crate) fn finalize(cx: &mut CodegenCx<'_, '_>) { let tcx = cx.tcx; // Ensure that LLVM is using a version of the coverage mapping format that - // agrees with our Rust-side code. Expected versions (encoded as n-1) are: - // - `CovMapVersion::Version7` (6) used by LLVM 18-19 - let covmap_version = { - let llvm_covmap_version = llvm_cov::mapping_version(); - let expected_versions = 6..=6; - assert!( - expected_versions.contains(&llvm_covmap_version), - "Coverage mapping version exposed by `llvm-wrapper` is out of sync; \ - expected {expected_versions:?} but was {llvm_covmap_version}" - ); - // This is the version number that we will embed in the covmap section: - llvm_covmap_version - }; + // agrees with our Rust-side code. Expected versions are: + // - `Version7` (6) used by LLVM 18 onwards. + let covmap_version = + CovmapVersion::try_from(llvm_cov::mapping_version()).unwrap_or_else(|raw_version: u32| { + panic!("unknown coverage mapping version reported by `llvm-wrapper`: {raw_version}") + }); + assert_matches!(covmap_version, CovmapVersion::Version7); debug!("Generating coverage map for CodegenUnit: `{}`", cx.codegen_unit.name()); // FIXME(#132395): Can this be none even when coverage is enabled? - let instances_used = match cx.coverage_cx { - Some(ref cx) => cx.instances_used.borrow(), - None => return, - }; - - let mut covfun_records = instances_used - .iter() - .copied() + let Some(ref coverage_cx) = cx.coverage_cx else { return }; + + let mut covfun_records = coverage_cx + .instances_used() + .into_iter() // Sort by symbol name, so that the global file table is built in an // order that doesn't depend on the stable-hash-based order in which // instances were visited during codegen. .sorted_by_cached_key(|&instance| tcx.symbol_name(instance).name) .filter_map(|instance| prepare_covfun_record(tcx, instance, true)) .collect::<Vec<_>>(); - drop(instances_used); // In a single designated CGU, also prepare covfun records for functions // in this crate that were instrumented for coverage, but are unused. @@ -205,7 +214,11 @@ impl VirtualFileMapping { /// Generates the contents of the covmap record for this CGU, which mostly /// consists of a header and a list of filenames. The record is then stored /// as a global variable in the `__llvm_covmap` section. -fn generate_covmap_record<'ll>(cx: &mut CodegenCx<'ll, '_>, version: u32, filenames_buffer: &[u8]) { +fn generate_covmap_record<'ll>( + cx: &mut CodegenCx<'ll, '_>, + version: CovmapVersion, + filenames_buffer: &[u8], +) { // A covmap record consists of four target-endian u32 values, followed by // the encoded filenames table. Two of the header fields are unused in // modern versions of the LLVM coverage mapping format, and are always 0. @@ -216,7 +229,7 @@ fn generate_covmap_record<'ll>(cx: &mut CodegenCx<'ll, '_>, version: u32, filena cx.const_u32(0), // (unused) cx.const_u32(filenames_buffer.len() as u32), cx.const_u32(0), // (unused) - cx.const_u32(version), + cx.const_u32(version.to_u32()), ], /* packed */ false, ); diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs index b704cf2b1cd..e0da8d36876 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs @@ -27,6 +27,9 @@ use crate::llvm; /// the final record that will be embedded in the `__llvm_covfun` section. #[derive(Debug)] pub(crate) struct CovfunRecord<'tcx> { + /// Not used directly, but helpful in debug messages. + _instance: Instance<'tcx>, + mangled_function_name: &'tcx str, source_hash: u64, is_used: bool, @@ -55,6 +58,7 @@ pub(crate) fn prepare_covfun_record<'tcx>( let expressions = prepare_expressions(ids_info); let mut covfun = CovfunRecord { + _instance: instance, mangled_function_name: tcx.symbol_name(instance).name, source_hash: if is_used { fn_cov_info.function_source_hash } else { 0 }, is_used, @@ -102,11 +106,21 @@ fn fill_region_tables<'tcx>( ids_info: &'tcx CoverageIdsInfo, covfun: &mut CovfunRecord<'tcx>, ) { + // If this function is unused, replace all counters with zero. + let counter_for_bcb = |bcb: BasicCoverageBlock| -> ffi::Counter { + let term = if covfun.is_used { + ids_info.term_for_bcb[bcb].expect("every BCB in a mapping was given a term") + } else { + CovTerm::Zero + }; + ffi::Counter::from_term(term) + }; + // Currently a function's mappings must all be in the same file, so use the // first mapping's span to determine the file. let source_map = tcx.sess.source_map(); let Some(first_span) = (try { fn_cov_info.mappings.first()?.span }) else { - debug_assert!(false, "function has no mappings: {:?}", covfun.mangled_function_name); + debug_assert!(false, "function has no mappings: {covfun:?}"); return; }; let source_file = source_map.lookup_source_file(first_span.lo()); @@ -117,7 +131,7 @@ fn fill_region_tables<'tcx>( // codegen needs to handle that gracefully to avoid #133606. // It's hard for tests to trigger this organically, so instead we set // `-Zcoverage-options=discard-all-spans-in-codegen` to force it to occur. - let discard_all = tcx.sess.coverage_discard_all_spans_in_codegen(); + let discard_all = tcx.sess.coverage_options().discard_all_spans_in_codegen; let make_coords = |span: Span| { if discard_all { None } else { spans::make_coords(source_map, &source_file, span) } }; @@ -126,23 +140,11 @@ fn fill_region_tables<'tcx>( code_regions, expansion_regions: _, // FIXME(Zalathar): Fill out support for expansion regions branch_regions, - mcdc_branch_regions, - mcdc_decision_regions, } = &mut covfun.regions; // For each counter/region pair in this function+file, convert it to a // form suitable for FFI. for &Mapping { ref kind, span } in &fn_cov_info.mappings { - // If this function is unused, replace all counters with zero. - let counter_for_bcb = |bcb: BasicCoverageBlock| -> ffi::Counter { - let term = if covfun.is_used { - ids_info.term_for_bcb[bcb].expect("every BCB in a mapping was given a term") - } else { - CovTerm::Zero - }; - ffi::Counter::from_term(term) - }; - let Some(coords) = make_coords(span) else { continue }; let cov_span = coords.make_coverage_span(local_file_id); @@ -157,20 +159,6 @@ fn fill_region_tables<'tcx>( false_counter: counter_for_bcb(false_bcb), }); } - MappingKind::MCDCBranch { true_bcb, false_bcb, mcdc_params } => { - mcdc_branch_regions.push(ffi::MCDCBranchRegion { - cov_span, - true_counter: counter_for_bcb(true_bcb), - false_counter: counter_for_bcb(false_bcb), - mcdc_branch_params: ffi::mcdc::BranchParameters::from(mcdc_params), - }); - } - MappingKind::MCDCDecision(mcdc_decision_params) => { - mcdc_decision_regions.push(ffi::MCDCDecisionRegion { - cov_span, - mcdc_decision_params: ffi::mcdc::DecisionParameters::from(mcdc_decision_params), - }); - } } } } @@ -184,6 +172,7 @@ pub(crate) fn generate_covfun_record<'tcx>( covfun: &CovfunRecord<'tcx>, ) { let &CovfunRecord { + _instance, mangled_function_name, source_hash, is_used, diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs index 39a59560c9d..574463be7ff 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs @@ -39,7 +39,10 @@ impl Coords { /// or other expansions), and if it does happen then skipping a span or function is /// better than an ICE or `llvm-cov` failure that the user might have no way to avoid. pub(crate) fn make_coords(source_map: &SourceMap, file: &SourceFile, span: Span) -> Option<Coords> { - let span = ensure_non_empty_span(source_map, span)?; + if span.is_empty() { + debug_assert!(false, "can't make coords from empty span: {span:?}"); + return None; + } let lo = span.lo(); let hi = span.hi(); @@ -70,29 +73,6 @@ pub(crate) fn make_coords(source_map: &SourceMap, file: &SourceFile, span: Span) }) } -fn ensure_non_empty_span(source_map: &SourceMap, span: Span) -> Option<Span> { - if !span.is_empty() { - return Some(span); - } - - // The span is empty, so try to enlarge it to cover an adjacent '{' or '}'. - source_map - .span_to_source(span, |src, start, end| try { - // Adjusting span endpoints by `BytePos(1)` is normally a bug, - // but in this case we have specifically checked that the character - // we're skipping over is one of two specific ASCII characters, so - // adjusting by exactly 1 byte is correct. - if src.as_bytes().get(end).copied() == Some(b'{') { - Some(span.with_hi(span.hi() + BytePos(1))) - } else if start > 0 && src.as_bytes()[start - 1] == b'}' { - Some(span.with_lo(span.lo() - BytePos(1))) - } else { - None - } - }) - .ok()? -} - /// If `llvm-cov` sees a source region that is improperly ordered (end < start), /// it will immediately exit with a fatal error. To prevent that from happening, /// discard regions that are improperly ordered, or might be interpreted in a diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index eefbd7cf6c4..6a58f495c9d 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -1,11 +1,10 @@ use std::cell::{OnceCell, RefCell}; use std::ffi::{CStr, CString}; -use rustc_abi::Size; use rustc_codegen_ssa::traits::{ - BuilderMethods, ConstCodegenMethods, CoverageInfoBuilderMethods, MiscCodegenMethods, + ConstCodegenMethods, CoverageInfoBuilderMethods, MiscCodegenMethods, }; -use rustc_data_structures::fx::{FxHashMap, FxIndexSet}; +use rustc_data_structures::fx::FxIndexMap; use rustc_middle::mir::coverage::CoverageKind; use rustc_middle::ty::Instance; use tracing::{debug, instrument}; @@ -20,38 +19,29 @@ mod mapgen; /// Extra per-CGU context/state needed for coverage instrumentation. pub(crate) struct CguCoverageContext<'ll, 'tcx> { - /// Coverage data for each instrumented function identified by DefId. - pub(crate) instances_used: RefCell<FxIndexSet<Instance<'tcx>>>, - pub(crate) pgo_func_name_var_map: RefCell<FxHashMap<Instance<'tcx>, &'ll llvm::Value>>, - pub(crate) mcdc_condition_bitmap_map: RefCell<FxHashMap<Instance<'tcx>, Vec<&'ll llvm::Value>>>, + /// Associates function instances with an LLVM global that holds the + /// function's symbol name, as needed by LLVM coverage intrinsics. + /// + /// Instances in this map are also considered "used" for the purposes of + /// emitting covfun records. Every covfun record holds a hash of its + /// symbol name, and `llvm-cov` will exit fatally if it can't resolve that + /// hash back to an entry in the binary's `__llvm_prf_names` linker section. + pub(crate) pgo_func_name_var_map: RefCell<FxIndexMap<Instance<'tcx>, &'ll llvm::Value>>, covfun_section_name: OnceCell<CString>, } impl<'ll, 'tcx> CguCoverageContext<'ll, 'tcx> { pub(crate) fn new() -> Self { - Self { - instances_used: RefCell::<FxIndexSet<_>>::default(), - pgo_func_name_var_map: Default::default(), - mcdc_condition_bitmap_map: Default::default(), - covfun_section_name: Default::default(), - } + Self { pgo_func_name_var_map: Default::default(), covfun_section_name: Default::default() } } - /// LLVM use a temp value to record evaluated mcdc test vector of each decision, which is - /// called condition bitmap. In order to handle nested decisions, several condition bitmaps can - /// be allocated for a function body. These values are named `mcdc.addr.{i}` and are a 32-bit - /// integers. They respectively hold the condition bitmaps for decisions with a depth of `i`. - fn try_get_mcdc_condition_bitmap( - &self, - instance: &Instance<'tcx>, - decision_depth: u16, - ) -> Option<&'ll llvm::Value> { - self.mcdc_condition_bitmap_map - .borrow() - .get(instance) - .and_then(|bitmap_map| bitmap_map.get(decision_depth as usize)) - .copied() // Dereference Option<&&Value> to Option<&Value> + /// Returns the list of instances considered "used" in this CGU, as + /// inferred from the keys of `pgo_func_name_var_map`. + pub(crate) fn instances_used(&self) -> Vec<Instance<'tcx>> { + // Collecting into a Vec is way easier than trying to juggle RefCell + // projections, and this should only run once per CGU anyway. + self.pgo_func_name_var_map.borrow().keys().copied().collect::<Vec<_>>() } } @@ -78,7 +68,10 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// string, to hold the function name passed to LLVM intrinsic /// `instrprof.increment()`. The `Value` is only created once per instance. /// Multiple invocations with the same instance return the same `Value`. - fn get_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value { + /// + /// This has the side-effect of causing coverage codegen to consider this + /// function "used", making it eligible to emit an associated covfun record. + fn ensure_pgo_func_name_var(&self, instance: Instance<'tcx>) -> &'ll llvm::Value { debug!("getting pgo_func_name_var for instance={:?}", instance); let mut pgo_func_name_var_map = self.coverage_cx().pgo_func_name_var_map.borrow_mut(); pgo_func_name_var_map.entry(instance).or_insert_with(|| { @@ -90,38 +83,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { } impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { - fn init_coverage(&mut self, instance: Instance<'tcx>) { - let Some(function_coverage_info) = - self.tcx.instance_mir(instance.def).function_coverage_info.as_deref() - else { - return; - }; - - // If there are no MC/DC bitmaps to set up, return immediately. - if function_coverage_info.mcdc_bitmap_bits == 0 { - return; - } - - let fn_name = self.get_pgo_func_name_var(instance); - let hash = self.const_u64(function_coverage_info.function_source_hash); - let bitmap_bits = self.const_u32(function_coverage_info.mcdc_bitmap_bits as u32); - self.mcdc_parameters(fn_name, hash, bitmap_bits); - - // Create pointers named `mcdc.addr.{i}` to stack-allocated condition bitmaps. - let mut cond_bitmaps = vec![]; - for i in 0..function_coverage_info.mcdc_num_condition_bitmaps { - // MC/DC intrinsics will perform loads/stores that use the ABI default - // alignment for i32, so our variable declaration should match. - let align = self.tcx.data_layout.i32_align.abi; - let cond_bitmap = self.alloca(Size::from_bytes(4), align); - llvm::set_value_name(cond_bitmap, format!("mcdc.addr.{i}").as_bytes()); - self.store(self.const_i32(0), cond_bitmap, align); - cond_bitmaps.push(cond_bitmap); - } - - self.coverage_cx().mcdc_condition_bitmap_map.borrow_mut().insert(instance, cond_bitmaps); - } - #[instrument(level = "debug", skip(self))] fn add_coverage(&mut self, instance: Instance<'tcx>, kind: &CoverageKind) { // Our caller should have already taken care of inlining subtleties, @@ -138,7 +99,7 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { // When that happens, we currently just discard those statements, so // the corresponding code will be undercounted. // FIXME(Zalathar): Find a better solution for mixed-coverage builds. - let Some(coverage_cx) = &bx.cx.coverage_cx else { return }; + let Some(_coverage_cx) = &bx.cx.coverage_cx else { return }; let Some(function_coverage_info) = bx.tcx.instance_mir(instance.def).function_coverage_info.as_deref() @@ -151,11 +112,6 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { return; }; - // Mark the instance as used in this CGU, for coverage purposes. - // This includes functions that were not partitioned into this CGU, - // but were MIR-inlined into one of this CGU's functions. - coverage_cx.instances_used.borrow_mut().insert(instance); - match *kind { CoverageKind::SpanMarker | CoverageKind::BlockMarker { .. } => unreachable!( "marker statement {kind:?} should have been removed by CleanupPostBorrowck" @@ -163,7 +119,7 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { CoverageKind::VirtualCounter { bcb } if let Some(&id) = ids_info.phys_counter_for_node.get(&bcb) => { - let fn_name = bx.get_pgo_func_name_var(instance); + let fn_name = bx.ensure_pgo_func_name_var(instance); let hash = bx.const_u64(function_coverage_info.function_source_hash); let num_counters = bx.const_u32(ids_info.num_counters); let index = bx.const_u32(id.as_u32()); @@ -175,30 +131,6 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { } // If a BCB doesn't have an associated physical counter, there's nothing to codegen. CoverageKind::VirtualCounter { .. } => {} - CoverageKind::CondBitmapUpdate { index, decision_depth } => { - let cond_bitmap = coverage_cx - .try_get_mcdc_condition_bitmap(&instance, decision_depth) - .expect("mcdc cond bitmap should have been allocated for updating"); - let cond_index = bx.const_i32(index as i32); - bx.mcdc_condbitmap_update(cond_index, cond_bitmap); - } - CoverageKind::TestVectorBitmapUpdate { bitmap_idx, decision_depth } => { - let cond_bitmap = - coverage_cx.try_get_mcdc_condition_bitmap(&instance, decision_depth).expect( - "mcdc cond bitmap should have been allocated for merging \ - into the global bitmap", - ); - assert!( - bitmap_idx as usize <= function_coverage_info.mcdc_bitmap_bits, - "bitmap index of the decision out of range" - ); - - let fn_name = bx.get_pgo_func_name_var(instance); - let hash = bx.const_u64(function_coverage_info.function_source_hash); - let bitmap_index = bx.const_u32(bitmap_idx); - bx.mcdc_tvbitmap_update(fn_name, hash, bitmap_index, cond_bitmap); - bx.mcdc_condbitmap_reset(cond_bitmap); - } } } } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index 8f0948b8183..7a6dc008c7b 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -1,13 +1,11 @@ // .debug_gdb_scripts binary section. -use rustc_ast::attr; use rustc_codegen_ssa::base::collect_debugger_visualizers_transitive; use rustc_codegen_ssa::traits::*; use rustc_hir::def_id::LOCAL_CRATE; use rustc_middle::bug; use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerType; use rustc_session::config::{CrateType, DebugInfo}; -use rustc_span::sym; use crate::builder::Builder; use crate::common::CodegenCx; @@ -74,8 +72,8 @@ pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>( .unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name)); llvm::set_section(section_var, c".debug_gdb_scripts"); llvm::set_initializer(section_var, cx.const_bytes(section_contents)); - llvm::LLVMSetGlobalConstant(section_var, llvm::True); - llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global); + llvm::LLVMSetGlobalConstant(section_var, llvm::TRUE); + llvm::set_unnamed_address(section_var, llvm::UnnamedAddr::Global); llvm::set_linkage(section_var, llvm::Linkage::LinkOnceODRLinkage); // This should make sure that the whole section is not larger than // the string it contains. Otherwise we get a warning from GDB. @@ -86,9 +84,6 @@ pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>( } pub(crate) fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool { - let omit_gdb_pretty_printer_section = - attr::contains_name(cx.tcx.hir_krate_attrs(), sym::omit_gdb_pretty_printer_section); - // To ensure the section `__rustc_debug_gdb_scripts_section__` will not create // ODR violations at link time, this section will not be emitted for rlibs since // each rlib could produce a different set of visualizers that would be embedded @@ -117,8 +112,7 @@ pub(crate) fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool { } }); - !omit_gdb_pretty_printer_section - && cx.sess().opts.debuginfo != DebugInfo::None + cx.sess().opts.debuginfo != DebugInfo::None && cx.sess().target.emit_debug_gdb_scripts && embed_visualizers } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 9b4736e50e6..0e9dbfba658 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -5,7 +5,7 @@ use std::path::{Path, PathBuf}; use std::sync::Arc; use std::{iter, ptr}; -use libc::{c_char, c_longlong, c_uint}; +use libc::{c_longlong, c_uint}; use rustc_abi::{Align, Size}; use rustc_codegen_ssa::debuginfo::type_names::{VTableNameKind, cpp_like_debuginfo}; use rustc_codegen_ssa::traits::*; @@ -1582,13 +1582,9 @@ pub(crate) fn apply_vcall_visibility_metadata<'ll, 'tcx>( }; let trait_ref_typeid = typeid_for_trait_ref(cx.tcx, trait_ref); + let typeid = cx.create_metadata(trait_ref_typeid.as_bytes()); unsafe { - let typeid = llvm::LLVMMDStringInContext2( - cx.llcx, - trait_ref_typeid.as_ptr() as *const c_char, - trait_ref_typeid.as_bytes().len(), - ); let v = [llvm::LLVMValueAsMetadata(cx.const_usize(0)), typeid]; llvm::LLVMRustGlobalAddMetadata( vtable, @@ -1630,7 +1626,7 @@ pub(crate) fn create_vtable_di_node<'ll, 'tcx>( // When full debuginfo is enabled, we want to try and prevent vtables from being // merged. Otherwise debuggers will have a hard time mapping from dyn pointer // to concrete type. - llvm::SetUnnamedAddress(vtable, llvm::UnnamedAddr::No); + llvm::set_unnamed_address(vtable, llvm::UnnamedAddr::No); let vtable_name = compute_debuginfo_vtable_name(cx.tcx, ty, poly_trait_ref, VTableNameKind::GlobalVariable); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs index 56fb12d3c22..18a783a348a 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs @@ -276,7 +276,7 @@ pub(super) fn build_type_with_children<'ll, 'tcx>( && let ty::Adt(adt_def, args) = ty.kind() { let def_id = adt_def.did(); - // If any sub type reference the original type definition and the sub type has a type + // If any child type references the original type definition and the child type has a type // parameter that strictly contains the original parameter, the original type is a recursive // type that can expanding indefinitely. Example, // ``` @@ -285,21 +285,43 @@ pub(super) fn build_type_with_children<'ll, 'tcx>( // Item(T), // } // ``` - let is_expanding_recursive = - debug_context(cx).adt_stack.borrow().iter().any(|(parent_def_id, parent_args)| { - if def_id == *parent_def_id { - args.iter().zip(parent_args.iter()).any(|(arg, parent_arg)| { - if let (Some(arg), Some(parent_arg)) = (arg.as_type(), parent_arg.as_type()) - { - arg != parent_arg && arg.contains(parent_arg) - } else { - false - } - }) - } else { - false - } - }); + let is_expanding_recursive = { + let stack = debug_context(cx).adt_stack.borrow(); + stack + .iter() + .enumerate() + .rev() + .skip(1) + .filter(|(_, (ancestor_def_id, _))| def_id == *ancestor_def_id) + .any(|(ancestor_index, (_, ancestor_args))| { + args.iter() + .zip(ancestor_args.iter()) + .filter_map(|(arg, ancestor_arg)| arg.as_type().zip(ancestor_arg.as_type())) + .any(|(arg, ancestor_arg)| + // Strictly contains. + (arg != ancestor_arg && arg.contains(ancestor_arg)) + // Check all types between current and ancestor use the + // ancestor_arg. + // Otherwise, duplicate wrappers in normal recursive type may be + // regarded as expanding. + // ``` + // struct Recursive { + // a: Box<Box<Recursive>>, + // } + // ``` + // It can produce an ADT stack like this, + // - Box<Recursive> + // - Recursive + // - Box<Box<Recursive>> + && stack[ancestor_index + 1..stack.len()].iter().all( + |(_, intermediate_args)| + intermediate_args + .iter() + .filter_map(|arg| arg.as_type()) + .any(|mid_arg| mid_arg.contains(ancestor_arg)) + )) + }) + }; if is_expanding_recursive { // FIXME: indicate that this is an expanding recursive type in stub metadata? return DINodeCreationResult::new(stub_info.metadata, false); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index 5ca2505cec4..2c3a84499ac 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -533,31 +533,26 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { // First, let's see if this is a method within an inherent impl. Because // if yes, we want to make the result subroutine DIE a child of the // subroutine's self-type. - if let Some(impl_def_id) = cx.tcx.impl_of_method(instance.def_id()) { - // If the method does *not* belong to a trait, proceed - if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { - let impl_self_ty = cx.tcx.instantiate_and_normalize_erasing_regions( - instance.args, - cx.typing_env(), - cx.tcx.type_of(impl_def_id), - ); - - // Only "class" methods are generally understood by LLVM, - // so avoid methods on other types (e.g., `<*mut T>::null`). - if let ty::Adt(def, ..) = impl_self_ty.kind() - && !def.is_box() - { - // Again, only create type information if full debuginfo is enabled - if cx.sess().opts.debuginfo == DebugInfo::Full && !impl_self_ty.has_param() - { - return (type_di_node(cx, impl_self_ty), true); - } else { - return (namespace::item_namespace(cx, def.did()), false); - } + // For trait method impls we still use the "parallel namespace" + // strategy + if let Some(imp_def_id) = cx.tcx.inherent_impl_of_assoc(instance.def_id()) { + let impl_self_ty = cx.tcx.instantiate_and_normalize_erasing_regions( + instance.args, + cx.typing_env(), + cx.tcx.type_of(imp_def_id), + ); + + // Only "class" methods are generally understood by LLVM, + // so avoid methods on other types (e.g., `<*mut T>::null`). + if let ty::Adt(def, ..) = impl_self_ty.kind() + && !def.is_box() + { + // Again, only create type information if full debuginfo is enabled + if cx.sess().opts.debuginfo == DebugInfo::Full && !impl_self_ty.has_param() { + return (type_di_node(cx, impl_self_ty), true); + } else { + return (namespace::item_namespace(cx, def.did()), false); } - } else { - // For trait method impls we still use the "parallel namespace" - // strategy } } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs index b4d639368b0..1dcf4ff3062 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs @@ -38,7 +38,7 @@ pub(crate) fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'l parent_scope, namespace_name_string.as_ptr(), namespace_name_string.len(), - llvm::False, // ExportSymbols (only relevant for C++ anonymous namespaces) + llvm::FALSE, // ExportSymbols (only relevant for C++ anonymous namespaces) ) }; diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 2419ec1f888..960a895a203 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -49,7 +49,7 @@ pub(crate) fn declare_simple_fn<'ll>( }; llvm::SetFunctionCallConv(llfn, callconv); - llvm::SetUnnamedAddress(llfn, unnamed); + llvm::set_unnamed_address(llfn, unnamed); llvm::set_visibility(llfn, visibility); llfn @@ -176,7 +176,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { { let typeid = cfi::typeid_for_instance(self.tcx, instance, options); if typeids.insert(typeid.clone()) { - self.add_type_metadata(llfn, typeid); + self.add_type_metadata(llfn, typeid.as_bytes()); } } } else { @@ -189,7 +189,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { .map(cfi::TypeIdOptions::from_iter) { let typeid = cfi::typeid_for_fnabi(self.tcx, fn_abi, options); - self.add_type_metadata(llfn, typeid); + self.add_type_metadata(llfn, typeid.as_bytes()); } } } @@ -215,7 +215,9 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { llfn } +} +impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { /// Declare a global with an intention to define it. /// /// Use this function when you intend to define a global. This function will @@ -234,13 +236,13 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// /// Use this function when you intend to define a global without a name. pub(crate) fn define_private_global(&self, ty: &'ll Type) -> &'ll Value { - unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) } + unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod(), ty) } } /// Gets declared value by name. pub(crate) fn get_declared_value(&self, name: &str) -> Option<&'ll Value> { debug!("get_declared_value(name={:?})", name); - unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_c_char_ptr(), name.len()) } + unsafe { llvm::LLVMRustGetNamedValue(self.llmod(), name.as_c_char_ptr(), name.len()) } } /// Gets defined or externally defined (AvailableExternally linkage) value by diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs index d50ad8a1a9c..627b0c9ff3b 100644 --- a/compiler/rustc_codegen_llvm/src/errors.rs +++ b/compiler/rustc_codegen_llvm/src/errors.rs @@ -20,11 +20,6 @@ pub(crate) struct SymbolAlreadyDefined<'a> { #[diag(codegen_llvm_sanitizer_memtag_requires_mte)] pub(crate) struct SanitizerMemtagRequiresMte; -#[derive(Diagnostic)] -#[diag(codegen_llvm_dynamic_linking_with_lto)] -#[note] -pub(crate) struct DynamicLinkingWithLTO; - pub(crate) struct ParseTargetMachineConfig<'a>(pub LlvmError<'a>); impl<G: EmissionGuarantee> Diagnostic<'_, G> for ParseTargetMachineConfig<'_> { @@ -38,29 +33,13 @@ impl<G: EmissionGuarantee> Diagnostic<'_, G> for ParseTargetMachineConfig<'_> { } #[derive(Diagnostic)] -#[diag(codegen_llvm_autodiff_without_lto)] -pub(crate) struct AutoDiffWithoutLTO; - -#[derive(Diagnostic)] #[diag(codegen_llvm_autodiff_without_enable)] pub(crate) struct AutoDiffWithoutEnable; #[derive(Diagnostic)] -#[diag(codegen_llvm_lto_disallowed)] -pub(crate) struct LtoDisallowed; - -#[derive(Diagnostic)] -#[diag(codegen_llvm_lto_dylib)] -pub(crate) struct LtoDylib; - -#[derive(Diagnostic)] -#[diag(codegen_llvm_lto_proc_macro)] -pub(crate) struct LtoProcMacro; - -#[derive(Diagnostic)] #[diag(codegen_llvm_lto_bitcode_from_rlib)] pub(crate) struct LtoBitcodeFromRlib { - pub llvm_err: String, + pub err: String, } #[derive(Diagnostic)] diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index fcc0d378f06..49d3dedbeab 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -3,24 +3,29 @@ use std::cmp::Ordering; use rustc_abi::{Align, BackendRepr, ExternAbi, Float, HasDataLayout, Primitive, Size}; use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh}; +use rustc_codegen_ssa::codegen_attrs::autodiff_attrs; use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue}; use rustc_codegen_ssa::traits::*; -use rustc_hir as hir; +use rustc_hir::def_id::LOCAL_CRATE; +use rustc_hir::{self as hir}; use rustc_middle::mir::BinOp; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf}; -use rustc_middle::ty::{self, GenericArgsRef, Ty}; +use rustc_middle::ty::{self, GenericArgsRef, Instance, Ty, TyCtxt, TypingEnv}; use rustc_middle::{bug, span_bug}; use rustc_span::{Span, Symbol, sym}; -use rustc_symbol_mangling::mangle_internal_symbol; +use rustc_symbol_mangling::{mangle_internal_symbol, symbol_name_for_instance_in_crate}; +use rustc_target::callconv::PassMode; use rustc_target::spec::PanicStrategy; use tracing::debug; use crate::abi::FnAbiLlvmExt; use crate::builder::Builder; +use crate::builder::autodiff::{adjust_activity_to_abi, generate_enzyme_call}; use crate::context::CodegenCx; +use crate::errors::AutoDiffWithoutEnable; use crate::llvm::{self, Metadata}; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; @@ -189,6 +194,10 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { &[ptr, args[1].immediate()], ) } + sym::autodiff => { + codegen_autodiff(self, tcx, instance, args, result); + return Ok(()); + } sym::is_val_statically_known => { if let OperandValue::Immediate(imm) = args[0].val { self.call_intrinsic( @@ -321,10 +330,16 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { _ => bug!(), }; let ptr = args[0].immediate(); + let locality = fn_args.const_at(1).to_value().valtree.unwrap_leaf().to_i32(); self.call_intrinsic( "llvm.prefetch", &[self.val_ty(ptr)], - &[ptr, self.const_i32(rw), args[1].immediate(), self.const_i32(cache_type)], + &[ + ptr, + self.const_i32(rw), + self.const_i32(locality), + self.const_i32(cache_type), + ], ) } sym::carrying_mul_add => { @@ -382,26 +397,16 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { let width = size.bits(); let llty = self.type_ix(width); match name { - sym::ctlz | sym::cttz => { - let y = self.const_bool(false); - let ret = self.call_intrinsic( - format!("llvm.{name}"), - &[llty], - &[args[0].immediate(), y], - ); - - self.intcast(ret, result.layout.llvm_type(self), false) - } - sym::ctlz_nonzero => { - let y = self.const_bool(true); - let ret = - self.call_intrinsic("llvm.ctlz", &[llty], &[args[0].immediate(), y]); - self.intcast(ret, result.layout.llvm_type(self), false) - } - sym::cttz_nonzero => { - let y = self.const_bool(true); + sym::ctlz | sym::ctlz_nonzero | sym::cttz | sym::cttz_nonzero => { + let y = + self.const_bool(name == sym::ctlz_nonzero || name == sym::cttz_nonzero); + let llvm_name = if name == sym::ctlz || name == sym::ctlz_nonzero { + "llvm.ctlz" + } else { + "llvm.cttz" + }; let ret = - self.call_intrinsic("llvm.cttz", &[llty], &[args[0].immediate(), y]); + self.call_intrinsic(llvm_name, &[llty], &[args[0].immediate(), y]); self.intcast(ret, result.layout.llvm_type(self), false) } sym::ctpop => { @@ -1123,6 +1128,143 @@ fn get_rust_try_fn<'a, 'll, 'tcx>( rust_try } +fn codegen_autodiff<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, + tcx: TyCtxt<'tcx>, + instance: ty::Instance<'tcx>, + args: &[OperandRef<'tcx, &'ll Value>], + result: PlaceRef<'tcx, &'ll Value>, +) { + if !tcx.sess.opts.unstable_opts.autodiff.contains(&rustc_session::config::AutoDiff::Enable) { + let _ = tcx.dcx().emit_almost_fatal(AutoDiffWithoutEnable); + } + + let fn_args = instance.args; + let callee_ty = instance.ty(tcx, bx.typing_env()); + + let sig = callee_ty.fn_sig(tcx).skip_binder(); + + let ret_ty = sig.output(); + let llret_ty = bx.layout_of(ret_ty).llvm_type(bx); + + // Get source, diff, and attrs + let (source_id, source_args) = match fn_args.into_type_list(tcx)[0].kind() { + ty::FnDef(def_id, source_params) => (def_id, source_params), + _ => bug!("invalid autodiff intrinsic args"), + }; + + let fn_source = match Instance::try_resolve(tcx, bx.cx.typing_env(), *source_id, source_args) { + Ok(Some(instance)) => instance, + Ok(None) => bug!( + "could not resolve ({:?}, {:?}) to a specific autodiff instance", + source_id, + source_args + ), + Err(_) => { + // An error has already been emitted + return; + } + }; + + let source_symbol = symbol_name_for_instance_in_crate(tcx, fn_source.clone(), LOCAL_CRATE); + let Some(fn_to_diff) = bx.cx.get_function(&source_symbol) else { + bug!("could not find source function") + }; + + let (diff_id, diff_args) = match fn_args.into_type_list(tcx)[1].kind() { + ty::FnDef(def_id, diff_args) => (def_id, diff_args), + _ => bug!("invalid args"), + }; + + let fn_diff = match Instance::try_resolve(tcx, bx.cx.typing_env(), *diff_id, diff_args) { + Ok(Some(instance)) => instance, + Ok(None) => bug!( + "could not resolve ({:?}, {:?}) to a specific autodiff instance", + diff_id, + diff_args + ), + Err(_) => { + // An error has already been emitted + return; + } + }; + + let val_arr = get_args_from_tuple(bx, args[2], fn_diff); + let diff_symbol = symbol_name_for_instance_in_crate(tcx, fn_diff.clone(), LOCAL_CRATE); + + let Some(mut diff_attrs) = autodiff_attrs(tcx, fn_diff.def_id()) else { + bug!("could not find autodiff attrs") + }; + + adjust_activity_to_abi( + tcx, + fn_source.ty(tcx, TypingEnv::fully_monomorphized()), + &mut diff_attrs.input_activity, + ); + + // Build body + generate_enzyme_call( + bx, + bx.cx, + fn_to_diff, + &diff_symbol, + llret_ty, + &val_arr, + diff_attrs.clone(), + result, + ); +} + +fn get_args_from_tuple<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, + tuple_op: OperandRef<'tcx, &'ll Value>, + fn_instance: Instance<'tcx>, +) -> Vec<&'ll Value> { + let cx = bx.cx; + let fn_abi = cx.fn_abi_of_instance(fn_instance, ty::List::empty()); + + match tuple_op.val { + OperandValue::Immediate(val) => vec![val], + OperandValue::Pair(v1, v2) => vec![v1, v2], + OperandValue::Ref(ptr) => { + let tuple_place = PlaceRef { val: ptr, layout: tuple_op.layout }; + + let mut result = Vec::with_capacity(fn_abi.args.len()); + let mut tuple_index = 0; + + for arg in &fn_abi.args { + match arg.mode { + PassMode::Ignore => {} + PassMode::Direct(_) | PassMode::Cast { .. } => { + let field = tuple_place.project_field(bx, tuple_index); + let llvm_ty = field.layout.llvm_type(bx.cx); + let val = bx.load(llvm_ty, field.val.llval, field.val.align); + result.push(val); + tuple_index += 1; + } + PassMode::Pair(_, _) => { + let field = tuple_place.project_field(bx, tuple_index); + let llvm_ty = field.layout.llvm_type(bx.cx); + let pair_val = bx.load(llvm_ty, field.val.llval, field.val.align); + result.push(bx.extract_value(pair_val, 0)); + result.push(bx.extract_value(pair_val, 1)); + tuple_index += 1; + } + PassMode::Indirect { .. } => { + let field = tuple_place.project_field(bx, tuple_index); + result.push(field.val.llval); + tuple_index += 1; + } + } + } + + result + } + + OperandValue::ZeroSized => vec![], + } +} + fn generic_simd_intrinsic<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, name: Symbol, diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 63ca51b006d..628cb34fd9e 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -22,42 +22,35 @@ use std::any::Any; use std::ffi::CStr; use std::mem::ManuallyDrop; +use std::path::PathBuf; use back::owned_target_machine::OwnedTargetMachine; use back::write::{create_informational_target_machine, create_target_machine}; use context::SimpleCx; -use errors::{AutoDiffWithoutLTO, ParseTargetMachineConfig}; +use errors::ParseTargetMachineConfig; use llvm_util::target_config; use rustc_ast::expand::allocator::AllocatorKind; -use rustc_ast::expand::autodiff_attrs::AutoDiffItem; -use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; +use rustc_codegen_ssa::back::lto::{SerializedModule, ThinModule}; use rustc_codegen_ssa::back::write::{ CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn, }; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig}; use rustc_data_structures::fx::FxIndexMap; -use rustc_errors::{DiagCtxtHandle, FatalError}; +use rustc_errors::DiagCtxtHandle; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; use rustc_middle::util::Providers; use rustc_session::Session; -use rustc_session::config::{Lto, OptLevel, OutputFilenames, PrintKind, PrintRequest}; +use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest}; use rustc_span::Symbol; -mod back { - pub(crate) mod archive; - pub(crate) mod lto; - pub(crate) mod owned_target_machine; - mod profiling; - pub(crate) mod write; -} - mod abi; mod allocator; mod asm; mod attributes; +mod back; mod base; mod builder; mod callee; @@ -167,56 +160,56 @@ impl WriteBackendMethods for LlvmCodegenBackend { let stats = llvm::build_string(|s| unsafe { llvm::LLVMRustPrintStatistics(s) }).unwrap(); print!("{stats}"); } - fn run_link( - cgcx: &CodegenContext<Self>, - dcx: DiagCtxtHandle<'_>, - modules: Vec<ModuleCodegen<Self::Module>>, - ) -> Result<ModuleCodegen<Self::Module>, FatalError> { - back::write::link(cgcx, dcx, modules) - } - fn run_fat_lto( + fn run_and_optimize_fat_lto( cgcx: &CodegenContext<Self>, + exported_symbols_for_lto: &[String], + each_linked_rlib_for_lto: &[PathBuf], modules: Vec<FatLtoInput<Self>>, - cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>, - ) -> Result<LtoModuleCodegen<Self>, FatalError> { - back::lto::run_fat(cgcx, modules, cached_modules) + ) -> ModuleCodegen<Self::Module> { + let mut module = + back::lto::run_fat(cgcx, exported_symbols_for_lto, each_linked_rlib_for_lto, modules); + + let dcx = cgcx.create_dcx(); + let dcx = dcx.handle(); + back::lto::run_pass_manager(cgcx, dcx, &mut module, false); + + module } fn run_thin_lto( cgcx: &CodegenContext<Self>, + exported_symbols_for_lto: &[String], + each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, Self::ThinBuffer)>, cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>, - ) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> { - back::lto::run_thin(cgcx, modules, cached_modules) + ) -> (Vec<ThinModule<Self>>, Vec<WorkProduct>) { + back::lto::run_thin( + cgcx, + exported_symbols_for_lto, + each_linked_rlib_for_lto, + modules, + cached_modules, + ) } fn optimize( cgcx: &CodegenContext<Self>, dcx: DiagCtxtHandle<'_>, module: &mut ModuleCodegen<Self::Module>, config: &ModuleConfig, - ) -> Result<(), FatalError> { + ) { back::write::optimize(cgcx, dcx, module, config) } - fn optimize_fat( - cgcx: &CodegenContext<Self>, - module: &mut ModuleCodegen<Self::Module>, - ) -> Result<(), FatalError> { - let dcx = cgcx.create_dcx(); - let dcx = dcx.handle(); - back::lto::run_pass_manager(cgcx, dcx, module, false) - } fn optimize_thin( cgcx: &CodegenContext<Self>, thin: ThinModule<Self>, - ) -> Result<ModuleCodegen<Self::Module>, FatalError> { + ) -> ModuleCodegen<Self::Module> { back::lto::optimize_thin_module(thin, cgcx) } fn codegen( cgcx: &CodegenContext<Self>, - dcx: DiagCtxtHandle<'_>, module: ModuleCodegen<Self::Module>, config: &ModuleConfig, - ) -> Result<CompiledModule, FatalError> { - back::write::codegen(cgcx, dcx, module, config) + ) -> CompiledModule { + back::write::codegen(cgcx, module, config) } fn prepare_thin( module: ModuleCodegen<Self::Module>, @@ -227,19 +220,6 @@ impl WriteBackendMethods for LlvmCodegenBackend { fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) { (module.name, back::lto::ModuleBuffer::new(module.module_llvm.llmod())) } - /// Generate autodiff rules - fn autodiff( - cgcx: &CodegenContext<Self>, - module: &ModuleCodegen<Self::Module>, - diff_fncs: Vec<AutoDiffItem>, - config: &ModuleConfig, - ) -> Result<(), FatalError> { - if cgcx.lto != Lto::Fat { - let dcx = cgcx.create_dcx(); - return Err(dcx.handle().emit_almost_fatal(AutoDiffWithoutLTO)); - } - builder::autodiff::differentiate(module, cgcx, diff_fncs, config) - } } impl LlvmCodegenBackend { @@ -423,24 +403,32 @@ impl ModuleLlvm { } } + fn tm_from_cgcx( + cgcx: &CodegenContext<LlvmCodegenBackend>, + name: &str, + dcx: DiagCtxtHandle<'_>, + ) -> OwnedTargetMachine { + let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name); + match (cgcx.tm_factory)(tm_factory_config) { + Ok(m) => m, + Err(e) => { + dcx.emit_fatal(ParseTargetMachineConfig(e)); + } + } + } + fn parse( cgcx: &CodegenContext<LlvmCodegenBackend>, name: &CStr, buffer: &[u8], dcx: DiagCtxtHandle<'_>, - ) -> Result<Self, FatalError> { + ) -> Self { unsafe { let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); - let llmod_raw = back::lto::parse_module(llcx, name, buffer, dcx)?; - let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, name.to_str().unwrap()); - let tm = match (cgcx.tm_factory)(tm_factory_config) { - Ok(m) => m, - Err(e) => { - return Err(dcx.emit_almost_fatal(ParseTargetMachineConfig(e))); - } - }; + let llmod_raw = back::lto::parse_module(llcx, name, buffer, dcx); + let tm = ModuleLlvm::tm_from_cgcx(cgcx, name.to_str().unwrap(), dcx); - Ok(ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) }) + ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) } } } diff --git a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs deleted file mode 100644 index 51bcc4d123d..00000000000 --- a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs +++ /dev/null @@ -1,94 +0,0 @@ -//! A wrapper around LLVM's archive (.a) code - -use std::path::Path; -use std::{slice, str}; - -use rustc_fs_util::path_to_c_string; - -pub(crate) struct ArchiveRO { - pub raw: &'static mut super::Archive, -} - -unsafe impl Send for ArchiveRO {} - -pub(crate) struct Iter<'a> { - raw: &'a mut super::ArchiveIterator<'a>, -} - -pub(crate) struct Child<'a> { - pub raw: &'a mut super::ArchiveChild<'a>, -} - -impl ArchiveRO { - /// Opens a static archive for read-only purposes. This is more optimized - /// than the `open` method because it uses LLVM's internal `Archive` class - /// rather than shelling out to `ar` for everything. - /// - /// If this archive is used with a mutable method, then an error will be - /// raised. - pub(crate) fn open(dst: &Path) -> Result<ArchiveRO, String> { - unsafe { - let s = path_to_c_string(dst); - let ar = super::LLVMRustOpenArchive(s.as_ptr()).ok_or_else(|| { - super::last_error().unwrap_or_else(|| "failed to open archive".to_owned()) - })?; - Ok(ArchiveRO { raw: ar }) - } - } - - pub(crate) fn iter(&self) -> Iter<'_> { - unsafe { Iter { raw: super::LLVMRustArchiveIteratorNew(self.raw) } } - } -} - -impl Drop for ArchiveRO { - fn drop(&mut self) { - unsafe { - super::LLVMRustDestroyArchive(&mut *(self.raw as *mut _)); - } - } -} - -impl<'a> Iterator for Iter<'a> { - type Item = Result<Child<'a>, String>; - - fn next(&mut self) -> Option<Result<Child<'a>, String>> { - unsafe { - match super::LLVMRustArchiveIteratorNext(self.raw) { - Some(raw) => Some(Ok(Child { raw })), - None => super::last_error().map(Err), - } - } - } -} - -impl<'a> Drop for Iter<'a> { - fn drop(&mut self) { - unsafe { - super::LLVMRustArchiveIteratorFree(&mut *(self.raw as *mut _)); - } - } -} - -impl<'a> Child<'a> { - pub(crate) fn name(&self) -> Option<&'a str> { - unsafe { - let mut name_len = 0; - let name_ptr = super::LLVMRustArchiveChildName(self.raw, &mut name_len); - if name_ptr.is_null() { - None - } else { - let name = slice::from_raw_parts(name_ptr as *const u8, name_len as usize); - str::from_utf8(name).ok().map(|s| s.trim()) - } - } - } -} - -impl<'a> Drop for Child<'a> { - fn drop(&mut self) { - unsafe { - super::LLVMRustArchiveChildFree(&mut *(self.raw as *mut _)); - } - } -} diff --git a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs index c696b8d8ff2..56d756e52cc 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs @@ -4,7 +4,7 @@ use libc::{c_char, c_uint}; use super::MetadataKindId; use super::ffi::{AttributeKind, BasicBlock, Metadata, Module, Type, Value}; -use crate::llvm::Bool; +use crate::llvm::{Bool, Builder}; #[link(name = "llvm-wrapper", kind = "static")] unsafe extern "C" { @@ -31,6 +31,14 @@ unsafe extern "C" { index: c_uint, kind: AttributeKind, ); + pub(crate) fn LLVMRustPositionBefore<'a>(B: &'a Builder<'_>, I: &'a Value); + pub(crate) fn LLVMRustPositionAfter<'a>(B: &'a Builder<'_>, I: &'a Value); + pub(crate) fn LLVMRustGetFunctionCall( + F: &Value, + name: *const c_char, + NameLen: libc::size_t, + ) -> Option<&Value>; + } unsafe extern "C" { diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 91ada856d59..ba590851dbd 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -11,9 +11,8 @@ //! the need for an extra cast from `*const u8` on the Rust side. #![allow(non_camel_case_types)] -#![allow(non_upper_case_globals)] -use std::fmt::Debug; +use std::fmt::{self, Debug}; use std::marker::PhantomData; use std::num::NonZero; use std::ptr; @@ -33,10 +32,59 @@ use crate::llvm; /// In the LLVM-C API, boolean values are passed as `typedef int LLVMBool`, /// which has a different ABI from Rust or C++ `bool`. -pub(crate) type Bool = c_int; +/// +/// This wrapper does not implement `PartialEq`. +/// To test the underlying boolean value, use [`Self::is_true`]. +#[derive(Clone, Copy)] +#[repr(transparent)] +pub(crate) struct Bool { + value: c_int, +} + +pub(crate) const TRUE: Bool = Bool::TRUE; +pub(crate) const FALSE: Bool = Bool::FALSE; + +impl Bool { + pub(crate) const TRUE: Self = Self { value: 1 }; + pub(crate) const FALSE: Self = Self { value: 0 }; + + pub(crate) const fn from_bool(rust_bool: bool) -> Self { + if rust_bool { Self::TRUE } else { Self::FALSE } + } + + /// Converts this LLVM-C boolean to a Rust `bool` + pub(crate) fn is_true(self) -> bool { + // Since we're interacting with a C API, follow the C convention of + // treating any nonzero value as true. + self.value != Self::FALSE.value + } +} + +impl Debug for Bool { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self.value { + 0 => f.write_str("FALSE"), + 1 => f.write_str("TRUE"), + // As with `Self::is_true`, treat any nonzero value as true. + v => write!(f, "TRUE ({v})"), + } + } +} -pub(crate) const True: Bool = 1 as Bool; -pub(crate) const False: Bool = 0 as Bool; +/// Convenience trait to convert `bool` to `llvm::Bool` with an explicit method call. +/// +/// Being able to write `b.to_llvm_bool()` is less noisy than `llvm::Bool::from(b)`, +/// while being more explicit and less mistake-prone than something like `b.into()`. +pub(crate) trait ToLlvmBool: Copy { + fn to_llvm_bool(self) -> llvm::Bool; +} + +impl ToLlvmBool for bool { + #[inline(always)] + fn to_llvm_bool(self) -> llvm::Bool { + llvm::Bool::from_bool(self) + } +} /// Wrapper for a raw enum value returned from LLVM's C APIs. /// @@ -97,6 +145,17 @@ pub(crate) enum ModuleFlagMergeBehavior { // Consts for the LLVM CallConv type, pre-cast to usize. +/// Must match the layout of `LLVMTailCallKind`. +#[derive(Copy, Clone, PartialEq, Debug)] +#[repr(C)] +#[allow(dead_code)] +pub(crate) enum TailCallKind { + None = 0, + Tail = 1, + MustTail = 2, + NoTail = 3, +} + /// LLVM CallingConv::ID. Should we wrap this? /// /// See <https://github.com/llvm/llvm-project/blob/main/llvm/include/llvm/IR/CallingConv.h> @@ -204,7 +263,7 @@ pub(crate) enum AttributeKind { MinSize = 4, Naked = 5, NoAlias = 6, - NoCapture = 7, + CapturesAddress = 7, NoInline = 8, NonNull = 9, NoRedZone = 10, @@ -239,6 +298,8 @@ pub(crate) enum AttributeKind { FnRetThunkExtern = 41, Writable = 42, DeadOnUnwind = 43, + DeadOnReturn = 44, + CapturesReadOnly = 45, } /// LLVMIntPredicate @@ -321,10 +382,15 @@ impl RealPredicate { } } -/// LLVMTypeKind -#[derive(Copy, Clone, PartialEq, Debug)] +/// Must match the layout of `LLVMTypeKind`. +/// +/// Use [`RawEnum<TypeKind>`] for values of `LLVMTypeKind` returned from LLVM, +/// to avoid risk of UB if LLVM adds new enum values. +/// +/// All of LLVM's variants should be declared here, even if no Rust-side code refers +/// to them, because unknown variants will cause [`RawEnum::to_rust`] to panic. +#[derive(Copy, Clone, PartialEq, Debug, TryFromU32)] #[repr(C)] -#[expect(dead_code, reason = "Some variants are unused, but are kept to match LLVM-C")] pub(crate) enum TypeKind { Void = 0, Half = 1, @@ -599,17 +665,6 @@ pub(crate) enum DiagnosticLevel { Remark, } -/// LLVMRustArchiveKind -#[derive(Copy, Clone)] -#[repr(C)] -pub(crate) enum ArchiveKind { - K_GNU, - K_BSD, - K_DARWIN, - K_COFF, - K_AIXBIG, -} - unsafe extern "C" { // LLVMRustThinLTOData pub(crate) type ThinLTOData; @@ -758,19 +813,12 @@ pub(crate) struct Builder<'a>(InvariantOpaque<'a>); pub(crate) struct PassManager<'a>(InvariantOpaque<'a>); unsafe extern "C" { pub type TargetMachine; - pub(crate) type Archive; } -#[repr(C)] -pub(crate) struct ArchiveIterator<'a>(InvariantOpaque<'a>); -#[repr(C)] -pub(crate) struct ArchiveChild<'a>(InvariantOpaque<'a>); unsafe extern "C" { pub(crate) type Twine; pub(crate) type DiagnosticInfo; pub(crate) type SMDiagnostic; } -#[repr(C)] -pub(crate) struct RustArchiveMember<'a>(InvariantOpaque<'a>); /// Opaque pointee of `LLVMOperandBundleRef`. #[repr(C)] pub(crate) struct OperandBundle<'a>(InvariantOpaque<'a>); @@ -1009,7 +1057,7 @@ unsafe extern "C" { ModuleID: *const c_char, C: &Context, ) -> &Module; - pub(crate) fn LLVMCloneModule(M: &Module) -> &Module; + pub(crate) safe fn LLVMCloneModule(M: &Module) -> &Module; /// Data layout. See Module::getDataLayout. pub(crate) fn LLVMGetDataLayoutStr(M: &Module) -> *const c_char; @@ -1035,6 +1083,8 @@ unsafe extern "C" { CanThrow: llvm::Bool, ) -> &'ll Value; + pub(crate) safe fn LLVMGetTypeKind(Ty: &Type) -> RawEnum<TypeKind>; + // Operations on integer types pub(crate) fn LLVMInt1TypeInContext(C: &Context) -> &Type; pub(crate) fn LLVMInt8TypeInContext(C: &Context) -> &Type; @@ -1138,6 +1188,11 @@ unsafe extern "C" { Count: c_uint, Packed: Bool, ) -> &'a Value; + pub(crate) fn LLVMConstNamedStruct<'a>( + StructTy: &'a Type, + ConstantVals: *const &'a Value, + Count: c_uint, + ) -> &'a Value; pub(crate) fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value; // Constant expressions @@ -1168,19 +1223,20 @@ unsafe extern "C" { pub(crate) fn LLVMGlobalGetValueType(Global: &Value) -> &Type; // Operations on global variables - pub(crate) fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>; + pub(crate) safe fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>; pub(crate) fn LLVMAddGlobal<'a>(M: &'a Module, Ty: &'a Type, Name: *const c_char) -> &'a Value; pub(crate) fn LLVMGetNamedGlobal(M: &Module, Name: *const c_char) -> Option<&Value>; pub(crate) fn LLVMGetFirstGlobal(M: &Module) -> Option<&Value>; pub(crate) fn LLVMGetNextGlobal(GlobalVar: &Value) -> Option<&Value>; pub(crate) fn LLVMDeleteGlobal(GlobalVar: &Value); - pub(crate) fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>; + pub(crate) safe fn LLVMGetInitializer(GlobalVar: &Value) -> Option<&Value>; pub(crate) fn LLVMSetInitializer<'a>(GlobalVar: &'a Value, ConstantVal: &'a Value); - pub(crate) fn LLVMIsThreadLocal(GlobalVar: &Value) -> Bool; + pub(crate) safe fn LLVMIsThreadLocal(GlobalVar: &Value) -> Bool; pub(crate) fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode); - pub(crate) fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool; - pub(crate) fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool); + pub(crate) safe fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool; + pub(crate) safe fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool); pub(crate) safe fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); + pub(crate) safe fn LLVMSetTailCallKind(CallInst: &Value, kind: TailCallKind); // Operations on attributes pub(crate) fn LLVMCreateStringAttribute( @@ -1217,6 +1273,8 @@ unsafe extern "C" { ) -> &'a BasicBlock; // Operations on instructions + pub(crate) fn LLVMGetInstructionParent(Inst: &Value) -> &BasicBlock; + pub(crate) fn LLVMGetCalledValue(CallInst: &Value) -> Option<&Value>; pub(crate) fn LLVMIsAInstruction(Val: &Value) -> Option<&Value>; pub(crate) fn LLVMGetFirstBasicBlock(Fn: &Value) -> &BasicBlock; pub(crate) fn LLVMGetOperand(Val: &Value, Index: c_uint) -> Option<&Value>; @@ -1492,12 +1550,6 @@ unsafe extern "C" { Ty: &'a Type, Name: *const c_char, ) -> &'a Value; - pub(crate) fn LLVMBuildArrayAlloca<'a>( - B: &Builder<'a>, - Ty: &'a Type, - Val: &'a Value, - Name: *const c_char, - ) -> &'a Value; pub(crate) fn LLVMBuildLoad2<'a>( B: &Builder<'a>, Ty: &'a Type, @@ -1724,7 +1776,7 @@ unsafe extern "C" { pub(crate) safe fn LLVMMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value; - pub(crate) fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr); + pub(crate) safe fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr); pub(crate) fn LLVMIsAConstantInt(value_ref: &Value) -> Option<&ConstantInt>; @@ -1828,9 +1880,6 @@ unsafe extern "C" { // Create and destroy contexts. pub(crate) fn LLVMRustContextCreate(shouldDiscardNames: bool) -> &'static mut Context; - /// See llvm::LLVMTypeKind::getTypeID. - pub(crate) fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind; - // Operations on all values pub(crate) fn LLVMRustGlobalAddMetadata<'a>( Val: &'a Value, @@ -1880,11 +1929,17 @@ unsafe extern "C" { C: &Context, effects: MemoryEffects, ) -> &Attribute; + /// ## Safety + /// - Each of `LowerWords` and `UpperWords` must point to an array that is + /// long enough to fully define an integer of size `NumBits`, i.e. each + /// pointer must point to `NumBits.div_ceil(64)` elements or more. + /// - The implementation will make its own copy of the pointed-to `u64` + /// values, so the pointers only need to outlive this function call. pub(crate) fn LLVMRustCreateRangeAttribute( C: &Context, - num_bits: c_uint, - lower_words: *const u64, - upper_words: *const u64, + NumBits: c_uint, + LowerWords: *const u64, + UpperWords: *const u64, ) -> &Attribute; // Operations on functions @@ -1980,12 +2035,12 @@ unsafe extern "C" { pub(crate) fn LLVMRustBuildMinNum<'a>( B: &Builder<'a>, LHS: &'a Value, - LHS: &'a Value, + RHS: &'a Value, ) -> &'a Value; pub(crate) fn LLVMRustBuildMaxNum<'a>( B: &Builder<'a>, LHS: &'a Value, - LHS: &'a Value, + RHS: &'a Value, ) -> &'a Value; // Atomic Operations @@ -2044,10 +2099,6 @@ unsafe extern "C" { NumExpansionRegions: size_t, BranchRegions: *const crate::coverageinfo::ffi::BranchRegion, NumBranchRegions: size_t, - MCDCBranchRegions: *const crate::coverageinfo::ffi::MCDCBranchRegion, - NumMCDCBranchRegions: size_t, - MCDCDecisionRegions: *const crate::coverageinfo::ffi::MCDCDecisionRegion, - NumMCDCDecisionRegions: size_t, BufferOut: &RustString, ); @@ -2429,8 +2480,9 @@ unsafe extern "C" { OutputObjFile: *const c_char, DebugInfoCompression: *const c_char, UseEmulatedTls: bool, - ArgsCstrBuff: *const c_char, + ArgsCstrBuff: *const c_uchar, // See "PTR_LEN_STR". ArgsCstrBuffLen: usize, + UseWasmEH: bool, ) -> *mut TargetMachine; pub(crate) fn LLVMRustDisposeTargetMachine(T: *mut TargetMachine); @@ -2495,19 +2547,6 @@ unsafe extern "C" { pub(crate) fn LLVMRustSetNormalizedTarget(M: &Module, triple: *const c_char); pub(crate) fn LLVMRustRunRestrictionPass(M: &Module, syms: *const *const c_char, len: size_t); - pub(crate) fn LLVMRustOpenArchive(path: *const c_char) -> Option<&'static mut Archive>; - pub(crate) fn LLVMRustArchiveIteratorNew(AR: &Archive) -> &mut ArchiveIterator<'_>; - pub(crate) fn LLVMRustArchiveIteratorNext<'a>( - AIR: &ArchiveIterator<'a>, - ) -> Option<&'a mut ArchiveChild<'a>>; - pub(crate) fn LLVMRustArchiveChildName( - ACR: &ArchiveChild<'_>, - size: &mut size_t, - ) -> *const c_char; - pub(crate) fn LLVMRustArchiveChildFree<'a>(ACR: &'a mut ArchiveChild<'a>); - pub(crate) fn LLVMRustArchiveIteratorFree<'a>(AIR: &'a mut ArchiveIterator<'a>); - pub(crate) fn LLVMRustDestroyArchive(AR: &'static mut Archive); - pub(crate) fn LLVMRustWriteTwineToString(T: &Twine, s: &RustString); pub(crate) fn LLVMRustUnpackOptimizationDiagnostic<'a>( @@ -2545,23 +2584,9 @@ unsafe extern "C" { num_ranges: &mut usize, ) -> bool; - pub(crate) fn LLVMRustWriteArchive( - Dst: *const c_char, - NumMembers: size_t, - Members: *const &RustArchiveMember<'_>, - WriteSymbtab: bool, - Kind: ArchiveKind, - isEC: bool, - ) -> LLVMRustResult; - pub(crate) fn LLVMRustArchiveMemberNew<'a>( - Filename: *const c_char, - Name: *const c_char, - Child: Option<&ArchiveChild<'a>>, - ) -> &'a mut RustArchiveMember<'a>; - pub(crate) fn LLVMRustArchiveMemberFree<'a>(Member: &'a mut RustArchiveMember<'a>); - pub(crate) fn LLVMRustSetDataLayoutFromTargetMachine<'a>(M: &'a Module, TM: &'a TargetMachine); + pub(crate) fn LLVMRustPositionBuilderPastAllocas<'a>(B: &Builder<'a>, Fn: &'a Value); pub(crate) fn LLVMRustPositionBuilderAtStart<'a>(B: &Builder<'a>, BB: &'a BasicBlock); pub(crate) fn LLVMRustSetModulePICLevel(M: &Module); @@ -2609,13 +2634,6 @@ unsafe extern "C" { len: usize, Identifier: *const c_char, ) -> Option<&Module>; - pub(crate) fn LLVMRustGetSliceFromObjectDataByName( - data: *const u8, - len: usize, - name: *const u8, - name_len: usize, - out_len: &mut usize, - ) -> *const u8; pub(crate) fn LLVMRustLinkerNew(M: &Module) -> &mut Linker<'_>; pub(crate) fn LLVMRustLinkerAdd( diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 661174a80df..d6974e22c85 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -3,7 +3,6 @@ use std::ffi::{CStr, CString}; use std::num::NonZero; use std::ptr; -use std::str::FromStr; use std::string::FromUtf8Error; use libc::c_uint; @@ -16,7 +15,6 @@ pub(crate) use self::MetadataType::*; pub(crate) use self::ffi::*; use crate::common::AsCCharPtr; -pub(crate) mod archive_ro; pub(crate) mod diagnostic; pub(crate) mod enzyme_ffi; mod ffi; @@ -42,32 +40,6 @@ pub(crate) fn AddFunctionAttributes<'ll>( } } -pub(crate) fn HasAttributeAtIndex<'ll>( - llfn: &'ll Value, - idx: AttributePlace, - kind: AttributeKind, -) -> bool { - unsafe { LLVMRustHasAttributeAtIndex(llfn, idx.as_uint(), kind) } -} - -pub(crate) fn HasStringAttribute<'ll>(llfn: &'ll Value, name: &str) -> bool { - unsafe { LLVMRustHasFnAttribute(llfn, name.as_c_char_ptr(), name.len()) } -} - -pub(crate) fn RemoveStringAttrFromFn<'ll>(llfn: &'ll Value, name: &str) { - unsafe { LLVMRustRemoveFnAttribute(llfn, name.as_c_char_ptr(), name.len()) } -} - -pub(crate) fn RemoveRustEnumAttributeAtIndex( - llfn: &Value, - place: AttributePlace, - kind: AttributeKind, -) { - unsafe { - LLVMRustRemoveEnumAttributeAtIndex(llfn, place.as_uint(), kind); - } -} - pub(crate) fn AddCallSiteAttributes<'ll>( callsite: &'ll Value, idx: AttributePlace, @@ -140,16 +112,26 @@ pub(crate) fn CreateAllocKindAttr(llcx: &Context, kind_arg: AllocKindFlags) -> & pub(crate) fn CreateRangeAttr(llcx: &Context, size: Size, range: WrappingRange) -> &Attribute { let lower = range.start; + // LLVM treats the upper bound as exclusive, but allows wrapping. let upper = range.end.wrapping_add(1); - let lower_words = [lower as u64, (lower >> 64) as u64]; - let upper_words = [upper as u64, (upper >> 64) as u64]; + + // Pass each `u128` endpoint value as a `[u64; 2]` array, least-significant part first. + let as_u64_array = |x: u128| [x as u64, (x >> 64) as u64]; + let lower_words: [u64; 2] = as_u64_array(lower); + let upper_words: [u64; 2] = as_u64_array(upper); + + // To ensure that LLVM doesn't try to read beyond the `[u64; 2]` arrays, + // we must explicitly check that `size_bits` does not exceed 128. + let size_bits = size.bits(); + assert!(size_bits <= 128); + // More robust assertions that are redundant with `size_bits <= 128` and + // should be optimized away. + assert!(size_bits.div_ceil(64) <= u64::try_from(lower_words.len()).unwrap()); + assert!(size_bits.div_ceil(64) <= u64::try_from(upper_words.len()).unwrap()); + let size_bits = c_uint::try_from(size_bits).unwrap(); + unsafe { - LLVMRustCreateRangeAttribute( - llcx, - size.bits().try_into().unwrap(), - lower_words.as_ptr(), - upper_words.as_ptr(), - ) + LLVMRustCreateRangeAttribute(llcx, size_bits, lower_words.as_ptr(), upper_words.as_ptr()) } } @@ -178,21 +160,6 @@ pub(crate) enum CodeGenOptSize { CodeGenOptSizeAggressive = 2, } -impl FromStr for ArchiveKind { - type Err = (); - - fn from_str(s: &str) -> Result<Self, Self::Err> { - match s { - "gnu" => Ok(ArchiveKind::K_GNU), - "bsd" => Ok(ArchiveKind::K_BSD), - "darwin" => Ok(ArchiveKind::K_DARWIN), - "coff" => Ok(ArchiveKind::K_COFF), - "aix_big" => Ok(ArchiveKind::K_AIXBIG), - _ => Err(()), - } - } -} - pub(crate) fn SetInstructionCallConv(instr: &Value, cc: CallConv) { unsafe { LLVMSetInstructionCallConv(instr, cc as c_uint); @@ -211,16 +178,14 @@ pub(crate) fn SetFunctionCallConv(fn_: &Value, cc: CallConv) { // function. // For more details on COMDAT sections see e.g., https://www.airs.com/blog/archives/52 pub(crate) fn SetUniqueComdat(llmod: &Module, val: &Value) { - let name_buf = get_value_name(val).to_vec(); + let name_buf = get_value_name(val); let name = CString::from_vec_with_nul(name_buf).or_else(|buf| CString::new(buf.into_bytes())).unwrap(); set_comdat(llmod, val, &name); } -pub(crate) fn SetUnnamedAddress(global: &Value, unnamed: UnnamedAddr) { - unsafe { - LLVMSetUnnamedAddress(global, unnamed); - } +pub(crate) fn set_unnamed_address(global: &Value, unnamed: UnnamedAddr) { + LLVMSetUnnamedAddress(global, unnamed); } pub(crate) fn set_thread_local_mode(global: &Value, mode: ThreadLocalMode) { @@ -260,9 +225,7 @@ pub(crate) fn set_initializer(llglobal: &Value, constant_val: &Value) { } pub(crate) fn set_global_constant(llglobal: &Value, is_constant: bool) { - unsafe { - LLVMSetGlobalConstant(llglobal, if is_constant { ffi::True } else { ffi::False }); - } + LLVMSetGlobalConstant(llglobal, is_constant.to_llvm_bool()); } pub(crate) fn get_linkage(llglobal: &Value) -> Linkage { @@ -276,7 +239,7 @@ pub(crate) fn set_linkage(llglobal: &Value, linkage: Linkage) { } pub(crate) fn is_declaration(llglobal: &Value) -> bool { - unsafe { LLVMIsDeclaration(llglobal) == ffi::True } + unsafe { LLVMIsDeclaration(llglobal) }.is_true() } pub(crate) fn get_visibility(llglobal: &Value) -> Visibility { @@ -319,12 +282,14 @@ pub(crate) fn get_param(llfn: &Value, index: c_uint) -> &Value { } } -/// Safe wrapper for `LLVMGetValueName2` into a byte slice -pub(crate) fn get_value_name(value: &Value) -> &[u8] { +/// Safe wrapper for `LLVMGetValueName2` +/// Needs to allocate the value, because `set_value_name` will invalidate +/// the pointer. +pub(crate) fn get_value_name(value: &Value) -> Vec<u8> { unsafe { let mut len = 0; let data = LLVMGetValueName2(value, &mut len); - std::slice::from_raw_parts(data.cast(), len) + std::slice::from_raw_parts(data.cast(), len).to_vec() } } diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index 0fb987bdf82..d927ffd78c2 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -26,7 +26,7 @@ static INIT: Once = Once::new(); pub(crate) fn init(sess: &Session) { unsafe { // Before we touch LLVM, make sure that multithreading is enabled. - if llvm::LLVMIsMultithreaded() != 1 { + if !llvm::LLVMIsMultithreaded().is_true() { bug!("LLVM compiled without support for threads"); } INIT.call_once(|| { @@ -262,14 +262,24 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea // Filter out features that are not supported by the current LLVM version ("aarch64", "fpmr") => None, // only existed in 18 ("arm", "fp16") => Some(LLVMFeature::new("fullfp16")), + // NVPTX targets added in LLVM 20 + ("nvptx64", "sm_100") if get_version().0 < 20 => None, + ("nvptx64", "sm_100a") if get_version().0 < 20 => None, + ("nvptx64", "sm_101") if get_version().0 < 20 => None, + ("nvptx64", "sm_101a") if get_version().0 < 20 => None, + ("nvptx64", "sm_120") if get_version().0 < 20 => None, + ("nvptx64", "sm_120a") if get_version().0 < 20 => None, + ("nvptx64", "ptx86") if get_version().0 < 20 => None, + ("nvptx64", "ptx87") if get_version().0 < 20 => None, // Filter out features that are not supported by the current LLVM version ("loongarch64", "div32" | "lam-bh" | "lamcas" | "ld-seq-sa" | "scq") if get_version().0 < 20 => { None } + ("loongarch32" | "loongarch64", "32s") if get_version().0 < 21 => None, // Filter out features that are not supported by the current LLVM version - ("riscv32" | "riscv64", "zacas") if get_version().0 < 20 => None, + ("riscv32" | "riscv64", "zacas" | "rva23u64" | "supm") if get_version().0 < 20 => None, ( "s390x", "message-security-assist-extension12" @@ -324,15 +334,12 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea /// /// We do not have to worry about RUSTC_SPECIFIC_FEATURES here, those are handled outside codegen. pub(crate) fn target_config(sess: &Session) -> TargetConfig { - // Add base features for the target. - // We do *not* add the -Ctarget-features there, and instead duplicate the logic for that below. - // The reason is that if LLVM considers a feature implied but we do not, we don't want that to - // show up in `cfg`. That way, `cfg` is entirely under our control -- except for the handling of - // the target CPU, that is still expanded to target features (with all their implied features) - // by LLVM. let target_machine = create_informational_target_machine(sess, true); let (unstable_target_features, target_features) = cfg_target_feature(sess, |feature| { + // This closure determines whether the target CPU has the feature according to LLVM. We do + // *not* consider the `-Ctarget-feature`s here, as that will be handled later in + // `cfg_target_feature`. if let Some(feat) = to_llvm_features(sess, feature) { // All the LLVM features this expands to must be enabled. for llvm_feature in feat { @@ -371,24 +378,25 @@ fn update_target_reliable_float_cfg(sess: &Session, cfg: &mut TargetConfig) { let target_abi = sess.target.options.abi.as_ref(); let target_pointer_width = sess.target.pointer_width; let version = get_version(); + let lt_20_1_1 = version < (20, 1, 1); + let lt_21_0_0 = version < (21, 0, 0); cfg.has_reliable_f16 = match (target_arch, target_os) { - // Selection failure <https://github.com/llvm/llvm-project/issues/50374> - ("s390x", _) => false, - // LLVM crash without neon <https://github.com/llvm/llvm-project/issues/129394> (now fixed) + // LLVM crash without neon <https://github.com/llvm/llvm-project/issues/129394> (fixed in llvm20) ("aarch64", _) - if !cfg.target_features.iter().any(|f| f.as_str() == "neon") - && version < (20, 1, 1) => + if !cfg.target_features.iter().any(|f| f.as_str() == "neon") && lt_20_1_1 => { false } // Unsupported <https://github.com/llvm/llvm-project/issues/94434> ("arm64ec", _) => false, + // Selection failure <https://github.com/llvm/llvm-project/issues/50374> (fixed in llvm21) + ("s390x", _) if lt_21_0_0 => false, // MinGW ABI bugs <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=115054> ("x86_64", "windows") if target_env == "gnu" && target_abi != "llvm" => false, // Infinite recursion <https://github.com/llvm/llvm-project/issues/97981> ("csky", _) => false, - ("hexagon", _) => false, + ("hexagon", _) if lt_21_0_0 => false, // (fixed in llvm21) ("powerpc" | "powerpc64", _) => false, ("sparc" | "sparc64", _) => false, ("wasm32" | "wasm64", _) => false, @@ -401,18 +409,21 @@ fn update_target_reliable_float_cfg(sess: &Session, cfg: &mut TargetConfig) { cfg.has_reliable_f128 = match (target_arch, target_os) { // Unsupported <https://github.com/llvm/llvm-project/issues/94434> ("arm64ec", _) => false, - // Selection bug <https://github.com/llvm/llvm-project/issues/96432> - ("mips64" | "mips64r6", _) => false, - // Selection bug <https://github.com/llvm/llvm-project/issues/95471> + // Selection bug <https://github.com/llvm/llvm-project/issues/96432> (fixed in llvm20) + ("mips64" | "mips64r6", _) if lt_20_1_1 => false, + // Selection bug <https://github.com/llvm/llvm-project/issues/95471>. This issue is closed + // but basic math still does not work. ("nvptx64", _) => false, + // Unsupported https://github.com/llvm/llvm-project/issues/121122 + ("amdgpu", _) => false, // ABI bugs <https://github.com/rust-lang/rust/issues/125109> et al. (full // list at <https://github.com/rust-lang/rust/issues/116909>) ("powerpc" | "powerpc64", _) => false, // ABI unsupported <https://github.com/llvm/llvm-project/issues/41838> ("sparc", _) => false, // Stack alignment bug <https://github.com/llvm/llvm-project/issues/77401>. NB: tests may - // not fail if our compiler-builtins is linked. - ("x86", _) => false, + // not fail if our compiler-builtins is linked. (fixed in llvm21) + ("x86", _) if lt_21_0_0 => false, // MinGW ABI bugs <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=115054> ("x86_64", "windows") if target_env == "gnu" && target_abi != "llvm" => false, // There are no known problems on other platforms, so the only requirement is that symbols @@ -433,6 +444,9 @@ fn update_target_reliable_float_cfg(sess: &Session, cfg: &mut TargetConfig) { // This rules out anything that doesn't have `long double` = `binary128`; <= 32 bits // (ld is `f64`), anything other than Linux (Windows and MacOS use `f64`), and `x86` // (ld is 80-bit extended precision). + // + // musl does not implement the symbols required for f128 math at all. + _ if target_env == "musl" => false, ("x86_64", _) => false, (_, "linux") if target_pointer_width == 64 => true, _ => false, diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs index 3f38e1e191b..52eefe2d4d2 100644 --- a/compiler/rustc_codegen_llvm/src/mono_item.rs +++ b/compiler/rustc_codegen_llvm/src/mono_item.rs @@ -1,8 +1,9 @@ use rustc_codegen_ssa::traits::*; +use rustc_hir::attrs::Linkage; use rustc_hir::def::DefKind; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_middle::bug; -use rustc_middle::mir::mono::{Linkage, Visibility}; +use rustc_middle::mir::mono::Visibility; use rustc_middle::ty::layout::{FnAbiOf, HasTypingEnv, LayoutOf}; use rustc_middle::ty::{self, Instance, TypeVisitableExt}; use rustc_session::config::CrateType; @@ -55,8 +56,8 @@ impl<'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> { let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty()); let lldecl = self.declare_fn(symbol_name, fn_abi, Some(instance)); llvm::set_linkage(lldecl, base::linkage_to_llvm(linkage)); - let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); - base::set_link_section(lldecl, attrs); + let attrs = self.tcx.codegen_instance_attrs(instance.def); + base::set_link_section(lldecl, &attrs); if (linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR) && self.tcx.sess.target.supports_comdat() { @@ -131,8 +132,8 @@ impl CodegenCx<'_, '_> { } // Thread-local variables generally don't support copy relocations. - let is_thread_local_var = unsafe { llvm::LLVMIsAGlobalVariable(llval) } - .is_some_and(|v| unsafe { llvm::LLVMIsThreadLocal(v) } == llvm::True); + let is_thread_local_var = llvm::LLVMIsAGlobalVariable(llval) + .is_some_and(|v| llvm::LLVMIsThreadLocal(v).is_true()); if is_thread_local_var { return false; } diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index ee472e75ed4..9ecaf5f24fe 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -2,7 +2,7 @@ use std::borrow::Borrow; use std::hash::{Hash, Hasher}; use std::{fmt, ptr}; -use libc::{c_char, c_uint}; +use libc::c_uint; use rustc_abi::{AddressSpace, Align, Integer, Reg, Size}; use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::traits::*; @@ -15,7 +15,7 @@ use rustc_target::callconv::{CastTarget, FnAbi}; use crate::abi::{FnAbiLlvmExt, LlvmType}; use crate::context::{CodegenCx, GenericCx, SCx}; pub(crate) use crate::llvm::Type; -use crate::llvm::{Bool, False, Metadata, True}; +use crate::llvm::{FALSE, Metadata, TRUE, ToLlvmBool}; use crate::type_of::LayoutLlvmExt; use crate::value::Value; use crate::{common, llvm}; @@ -53,7 +53,9 @@ impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { } pub(crate) fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) { - unsafe { llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) } + unsafe { + llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed.to_llvm_bool()) + } } pub(crate) fn type_void(&self) -> &'ll Type { unsafe { llvm::LLVMVoidTypeInContext(self.llcx()) } @@ -139,7 +141,7 @@ impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { } pub(crate) fn type_variadic_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { - unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) } + unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, TRUE) } } pub(crate) fn type_i1(&self) -> &'ll Type { @@ -152,7 +154,7 @@ impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { self.llcx(), els.as_ptr(), els.len() as c_uint, - packed as Bool, + packed.to_llvm_bool(), ) } } @@ -200,11 +202,11 @@ impl<'ll, CX: Borrow<SCx<'ll>>> BaseTypeCodegenMethods for GenericCx<'ll, CX> { } fn type_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { - unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) } + unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, FALSE) } } fn type_kind(&self, ty: &'ll Type) -> TypeKind { - unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() } + llvm::LLVMGetTypeKind(ty).to_rust().to_generic() } fn type_ptr(&self) -> &'ll Type { @@ -298,8 +300,8 @@ impl<'ll, 'tcx> LayoutTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } impl<'ll, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { - fn add_type_metadata(&self, function: &'ll Value, typeid: String) { - let typeid_metadata = self.typeid_metadata(typeid).unwrap(); + fn add_type_metadata(&self, function: &'ll Value, typeid: &[u8]) { + let typeid_metadata = self.create_metadata(typeid); unsafe { let v = [llvm::LLVMValueAsMetadata(self.const_usize(0)), typeid_metadata]; llvm::LLVMRustGlobalAddMetadata( @@ -310,8 +312,8 @@ impl<'ll, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn set_type_metadata(&self, function: &'ll Value, typeid: String) { - let typeid_metadata = self.typeid_metadata(typeid).unwrap(); + fn set_type_metadata(&self, function: &'ll Value, typeid: &[u8]) { + let typeid_metadata = self.create_metadata(typeid); unsafe { let v = [llvm::LLVMValueAsMetadata(self.const_usize(0)), typeid_metadata]; llvm::LLVMGlobalSetMetadata( @@ -322,10 +324,8 @@ impl<'ll, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn typeid_metadata(&self, typeid: String) -> Option<&'ll Metadata> { - Some(unsafe { - llvm::LLVMMDStringInContext2(self.llcx, typeid.as_ptr() as *const c_char, typeid.len()) - }) + fn typeid_metadata(&self, typeid: &[u8]) -> Option<&'ll Metadata> { + Some(self.create_metadata(typeid)) } fn add_kcfi_type_metadata(&self, function: &'ll Value, kcfi_typeid: u32) { |
