diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
24 files changed, 498 insertions, 776 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index d221bad28ef..e02b457fd0b 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -216,9 +216,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx)); - let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); - bx.store(val, cast_dst, self.layout.align.abi); + bx.store(val, dst.llval, self.layout.align.abi); } else { // The actual return type is a struct, but the ABI // adaptation code has cast it into some scalar type. The @@ -336,7 +334,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx), PassMode::Cast(cast, _) => cast.llvm_type(cx), PassMode::Indirect { .. } => { - llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + llargument_tys.push(cx.type_ptr()); cx.type_void() } }; @@ -364,9 +362,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } cast.llvm_type(cx) } - PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => { - cx.type_ptr_to(arg.memory_ty(cx)) - } + PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => cx.type_ptr(), }; llargument_tys.push(llarg_ty); } @@ -379,12 +375,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { - unsafe { - llvm::LLVMPointerType( - self.llvm_type(cx), - cx.data_layout().instruction_address_space.0 as c_uint, - ) - } + cx.type_ptr_ext(cx.data_layout().instruction_address_space) } fn llvm_cconv(&self) -> llvm::CallConv { diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index a57508815d6..8bb93025c45 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -28,14 +28,14 @@ pub(crate) unsafe fn codegen( tws => bug!("Unsupported target word size for int: {}", tws), }; let i8 = llvm::LLVMInt8TypeInContext(llcx); - let i8p = llvm::LLVMPointerType(i8, 0); + let i8p = llvm::LLVMPointerTypeInContext(llcx, 0); let void = llvm::LLVMVoidTypeInContext(llcx); if kind == AllocatorKind::Default { for method in ALLOCATOR_METHODS { let mut args = Vec::with_capacity(method.inputs.len()); - for ty in method.inputs.iter() { - match *ty { + for input in method.inputs.iter() { + match input.ty { AllocatorTy::Layout => { args.push(usize); // size args.push(usize); // align diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index bf0ed8a4de0..b2d28cef899 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -7,7 +7,7 @@ use crate::{LlvmCodegenBackend, ModuleLlvm}; use object::read::archive::ArchiveFile; use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared}; use rustc_codegen_ssa::back::symbol_export; -use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, TargetMachineFactoryConfig}; +use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, TargetMachineFactoryConfig}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{looks_like_rust_object_file, ModuleCodegen, ModuleKind}; use rustc_data_structures::fx::FxHashMap; @@ -166,7 +166,7 @@ fn get_bitcode_slice_from_object_data(obj: &[u8]) -> Result<&[u8], LtoBitcodeFro /// for further optimization. pub(crate) fn run_fat( cgcx: &CodegenContext<LlvmCodegenBackend>, - modules: Vec<FatLTOInput<LlvmCodegenBackend>>, + modules: Vec<FatLtoInput<LlvmCodegenBackend>>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, ) -> Result<LtoModuleCodegen<LlvmCodegenBackend>, FatalError> { let diag_handler = cgcx.create_diag_handler(); @@ -220,7 +220,7 @@ pub(crate) fn prepare_thin(module: ModuleCodegen<ModuleLlvm>) -> (String, ThinBu fn fat_lto( cgcx: &CodegenContext<LlvmCodegenBackend>, diag_handler: &Handler, - modules: Vec<FatLTOInput<LlvmCodegenBackend>>, + modules: Vec<FatLtoInput<LlvmCodegenBackend>>, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, symbols_below_threshold: &[*const libc::c_char], @@ -245,8 +245,8 @@ fn fat_lto( })); for module in modules { match module { - FatLTOInput::InMemory(m) => in_memory.push(m), - FatLTOInput::Serialized { name, buffer } => { + FatLtoInput::InMemory(m) => in_memory.push(m), + FatLtoInput::Serialized { name, buffer } => { info!("pushing serialized module {:?}", name); let buffer = SerializedModule::Local(buffer); serialized_modules.push((buffer, CString::new(name).unwrap())); diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index cbcbdea84da..47cc5bd52e2 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -4,7 +4,6 @@ use crate::back::profiling::{ }; use crate::base; use crate::common; -use crate::consts; use crate::errors::{ CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode, }; @@ -321,6 +320,7 @@ impl<'a> DiagnosticHandlers<'a> { }) .and_then(|dir| dir.to_str().and_then(|p| CString::new(p).ok())); + let pgo_available = cgcx.opts.cg.profile_use.is_some(); let data = Box::into_raw(Box::new((cgcx, handler))); unsafe { let old_handler = llvm::LLVMRustContextGetDiagnosticHandler(llcx); @@ -334,6 +334,7 @@ impl<'a> DiagnosticHandlers<'a> { // The `as_ref()` is important here, otherwise the `CString` will be dropped // too soon! remark_file.as_ref().map(|dir| dir.as_ptr()).unwrap_or(std::ptr::null()), + pgo_available, ); DiagnosticHandlers { data, llcx, old_handler } } @@ -382,29 +383,22 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void } llvm::diagnostic::Optimization(opt) => { - let enabled = match cgcx.remark { - Passes::All => true, - Passes::Some(ref v) => v.iter().any(|s| *s == opt.pass_name), - }; - - if enabled { - diag_handler.emit_note(FromLlvmOptimizationDiag { - filename: &opt.filename, - line: opt.line, - column: opt.column, - pass_name: &opt.pass_name, - kind: match opt.kind { - OptimizationDiagnosticKind::OptimizationRemark => "success", - OptimizationDiagnosticKind::OptimizationMissed - | OptimizationDiagnosticKind::OptimizationFailure => "missed", - OptimizationDiagnosticKind::OptimizationAnalysis - | OptimizationDiagnosticKind::OptimizationAnalysisFPCommute - | OptimizationDiagnosticKind::OptimizationAnalysisAliasing => "analysis", - OptimizationDiagnosticKind::OptimizationRemarkOther => "other", - }, - message: &opt.message, - }); - } + diag_handler.emit_note(FromLlvmOptimizationDiag { + filename: &opt.filename, + line: opt.line, + column: opt.column, + pass_name: &opt.pass_name, + kind: match opt.kind { + OptimizationDiagnosticKind::OptimizationRemark => "success", + OptimizationDiagnosticKind::OptimizationMissed + | OptimizationDiagnosticKind::OptimizationFailure => "missed", + OptimizationDiagnosticKind::OptimizationAnalysis + | OptimizationDiagnosticKind::OptimizationAnalysisFPCommute + | OptimizationDiagnosticKind::OptimizationAnalysisAliasing => "analysis", + OptimizationDiagnosticKind::OptimizationRemarkOther => "other", + }, + message: &opt.message, + }); } llvm::diagnostic::PGO(diagnostic_ref) | llvm::diagnostic::Linker(diagnostic_ref) => { let message = llvm::build_string(|s| { @@ -478,6 +472,8 @@ pub(crate) unsafe fn llvm_optimize( Some(llvm::SanitizerOptions { sanitize_address: config.sanitizer.contains(SanitizerSet::ADDRESS), sanitize_address_recover: config.sanitizer_recover.contains(SanitizerSet::ADDRESS), + sanitize_cfi: config.sanitizer.contains(SanitizerSet::CFI), + sanitize_kcfi: config.sanitizer.contains(SanitizerSet::KCFI), sanitize_memory: config.sanitizer.contains(SanitizerSet::MEMORY), sanitize_memory_recover: config.sanitizer_recover.contains(SanitizerSet::MEMORY), sanitize_memory_track_origins: config.sanitizer_memory_track_origins as c_int, @@ -513,6 +509,7 @@ pub(crate) unsafe fn llvm_optimize( &*module.module_llvm.tm, to_pass_builder_opt_level(opt_level), opt_stage, + cgcx.opts.cg.linker_plugin_lto.enabled(), config.no_prepopulate_passes, config.verify_llvm_ir, using_thin_buffers, @@ -992,7 +989,7 @@ fn create_msvc_imps( let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" }; unsafe { - let i8p_ty = Type::i8p_llcx(llcx); + let ptr_ty = Type::ptr_llcx(llcx); let globals = base::iter_globals(llmod) .filter(|&val| { llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage @@ -1012,8 +1009,8 @@ fn create_msvc_imps( .collect::<Vec<_>>(); for (imp_name, val) in globals { - let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast()); - llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty)); + let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr().cast()); + llvm::LLVMSetInitializer(imp, val); llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); } } diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index 5b5f81c0329..b659fd02eec 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -123,8 +123,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen // happen after the llvm.used variables are created. for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() { unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g)); - llvm::LLVMReplaceAllUsesWith(old_g, bitcast); + llvm::LLVMReplaceAllUsesWith(old_g, new_g); llvm::LLVMDeleteGlobal(old_g); } } diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 8c2b1831f0a..ac6d8f84142 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -652,7 +652,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { flags: MemFlags, ) -> &'ll Value { debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); - let ptr = self.check_store(val, ptr); + assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer); unsafe { let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); let align = @@ -682,7 +682,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { size: Size, ) { debug!("Store {:?} -> {:?}", val, ptr); - let ptr = self.check_store(val, ptr); + assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer); unsafe { let store = llvm::LLVMRustBuildAtomicStore( self.llbuilder, @@ -873,8 +873,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported"); let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_i8p()); - let src = self.pointercast(src, self.type_i8p()); unsafe { llvm::LLVMRustBuildMemCpy( self.llbuilder, @@ -900,8 +898,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported"); let size = self.intcast(size, self.type_isize(), false); let is_volatile = flags.contains(MemFlags::VOLATILE); - let dst = self.pointercast(dst, self.type_i8p()); - let src = self.pointercast(src, self.type_i8p()); unsafe { llvm::LLVMRustBuildMemMove( self.llbuilder, @@ -924,7 +920,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { flags: MemFlags, ) { let is_volatile = flags.contains(MemFlags::VOLATILE); - let ptr = self.pointercast(ptr, self.type_i8p()); unsafe { llvm::LLVMRustBuildMemSet( self.llbuilder, @@ -981,7 +976,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) { - let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); + let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false); let landing_pad = self.landing_pad(ty, pers_fn, 0); unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); @@ -990,14 +985,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) { - let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); + let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false); let landing_pad = self.landing_pad(ty, pers_fn, 1); - self.add_clause(landing_pad, self.const_array(self.type_i8p(), &[])); + self.add_clause(landing_pad, self.const_array(self.type_ptr(), &[])); (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1)) } fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) { - let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); + let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false); let mut exn = self.const_poison(ty); exn = self.insert_value(exn, exn0, 0); exn = self.insert_value(exn, exn1, 1); @@ -1161,7 +1156,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) }; let llty = self.cx.type_func( - &[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()], + &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()], self.cx.type_void(), ); let args = &[fn_name, hash, num_counters, index]; @@ -1387,25 +1382,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { ret.expect("LLVM does not have support for catchret") } - fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { - let dest_ptr_ty = self.cx.val_ty(ptr); - let stored_ty = self.cx.val_ty(val); - let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); - - assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); - - if dest_ptr_ty == stored_ptr_ty { - ptr - } else { - debug!( - "type mismatch in store. \ - Expected {:?}, got {:?}; inserting bitcast", - dest_ptr_ty, stored_ptr_ty - ); - self.bitcast(ptr, stored_ptr_ty) - } - } - fn check_call<'b>( &mut self, typ: &str, @@ -1466,7 +1442,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { return; } - let ptr = self.pointercast(ptr, self.cx.type_i8p()); self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]); } @@ -1537,9 +1512,9 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, ) { - let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() }; - if is_indirect_call && fn_abi.is_some() && self.tcx.sess.is_sanitizer_cfi_enabled() { - if fn_attrs.is_some() && fn_attrs.unwrap().no_sanitize.contains(SanitizerSet::CFI) { + let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) }; + if self.tcx.sess.is_sanitizer_cfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call { + if let Some(fn_attrs) = fn_attrs && fn_attrs.no_sanitize.contains(SanitizerSet::CFI) { return; } @@ -1551,7 +1526,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { options.insert(TypeIdOptions::NORMALIZE_INTEGERS); } - let typeid = typeid_for_fnabi(self.tcx, fn_abi.unwrap(), options); + let typeid = typeid_for_fnabi(self.tcx, fn_abi, options); let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap(); // Test whether the function pointer is associated with the type identifier. @@ -1575,25 +1550,26 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, llfn: &'ll Value, ) -> Option<llvm::OperandBundleDef<'ll>> { - let is_indirect_call = unsafe { llvm::LLVMIsAFunction(llfn).is_none() }; - let kcfi_bundle = if is_indirect_call && self.tcx.sess.is_sanitizer_kcfi_enabled() { - if fn_attrs.is_some() && fn_attrs.unwrap().no_sanitize.contains(SanitizerSet::KCFI) { - return None; - } + let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) }; + let kcfi_bundle = + if self.tcx.sess.is_sanitizer_kcfi_enabled() && let Some(fn_abi) = fn_abi && is_indirect_call { + if let Some(fn_attrs) = fn_attrs && fn_attrs.no_sanitize.contains(SanitizerSet::KCFI) { + return None; + } - let mut options = TypeIdOptions::empty(); - if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { - options.insert(TypeIdOptions::GENERALIZE_POINTERS); - } - if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { - options.insert(TypeIdOptions::NORMALIZE_INTEGERS); - } + let mut options = TypeIdOptions::empty(); + if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { + options.insert(TypeIdOptions::GENERALIZE_POINTERS); + } + if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { + options.insert(TypeIdOptions::NORMALIZE_INTEGERS); + } - let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi.unwrap(), options); - Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)])) - } else { - None - }; + let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options); + Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)])) + } else { + None + }; kcfi_bundle } } diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index 5d3e245f45f..36c098218cf 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -4,13 +4,11 @@ //! and methods are represented as just a fn ptr and not a full //! closure. -use crate::abi::FnAbiLlvmExt; use crate::attributes; use crate::common; use crate::context::CodegenCx; use crate::llvm; use crate::value::Value; -use rustc_codegen_ssa::traits::*; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use rustc_middle::ty::{self, Instance, TypeVisitableExt}; @@ -45,39 +43,7 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); let llfn = if let Some(llfn) = cx.get_declared_value(sym) { - // Create a fn pointer with the new signature. - let llptrty = fn_abi.ptr_to_llvm_type(cx); - - // This is subtle and surprising, but sometimes we have to bitcast - // the resulting fn pointer. The reason has to do with external - // functions. If you have two crates that both bind the same C - // library, they may not use precisely the same types: for - // example, they will probably each declare their own structs, - // which are distinct types from LLVM's point of view (nominal - // types). - // - // Now, if those two crates are linked into an application, and - // they contain inlined code, you can wind up with a situation - // where both of those functions wind up being loaded into this - // application simultaneously. In that case, the same function - // (from LLVM's point of view) requires two types. But of course - // LLVM won't allow one function to have two types. - // - // What we currently do, therefore, is declare the function with - // one of the two types (whichever happens to come first) and then - // bitcast as needed when the function is referenced to make sure - // it has the type we expect. - // - // This can occur on either a crate-local or crate-external - // reference. It also occurs when testing libcore and in some - // other weird situations. Annoying. - if cx.val_ty(llfn) != llptrty { - debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); - cx.const_ptrcast(llfn, llptrty) - } else { - debug!("get_fn: not casting pointer!"); - llfn - } + llfn } else { let instance_def_id = instance.def_id(); let llfn = if tcx.sess.target.arch == "x86" && diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 130ff839e74..0b0816c27b6 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -1,10 +1,9 @@ //! Code that is useful in various codegen modules. -use crate::consts::{self, const_alloc_to_llvm}; +use crate::consts::const_alloc_to_llvm; pub use crate::context::CodegenCx; use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True}; use crate::type_::Type; -use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_ast::Mutability; @@ -13,7 +12,6 @@ use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher}; use rustc_hir::def_id::DefId; use rustc_middle::bug; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; -use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::TyCtxt; use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType}; use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer}; @@ -211,11 +209,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { }) .1; let len = s.len(); - let cs = consts::ptrcast( - str_global, - self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)), - ); - (cs, self.const_usize(len as u64)) + (str_global, self.const_usize(len as u64)) } fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value { @@ -292,7 +286,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { let llval = unsafe { llvm::LLVMConstInBoundsGEP2( self.type_i8(), - self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)), + self.const_bitcast(base_addr, self.type_ptr_ext(base_addr_space)), &self.const_usize(offset.bytes()), 1, ) @@ -310,10 +304,6 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { const_alloc_to_llvm(self, alloc) } - fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { - consts::ptrcast(val, ty) - } - fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { self.const_bitcast(val, ty) } @@ -322,7 +312,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { llvm::LLVMConstInBoundsGEP2( self.type_i8(), - self.const_bitcast(base_addr, self.type_i8p()), + base_addr, &self.const_usize(offset.bytes()), 1, ) diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index eb5969e5644..95af2f8ef4a 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -103,7 +103,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< value: Primitive::Pointer(address_space), valid_range: WrappingRange::full(dl.pointer_size), }, - cx.type_i8p_ext(address_space), + cx.type_ptr_ext(address_space), )); next_offset = offset + pointer_size; } @@ -179,7 +179,7 @@ fn check_and_apply_linkage<'ll, 'tcx>( }) }); llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage); - llvm::LLVMSetInitializer(g2, cx.const_ptrcast(g1, llty)); + llvm::LLVMSetInitializer(g2, g1); g2 } } else if cx.tcx.sess.target.arch == "x86" && @@ -193,10 +193,6 @@ fn check_and_apply_linkage<'ll, 'tcx>( } } -pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value { - unsafe { llvm::LLVMConstPointerCast(val, ty) } -} - impl<'ll> CodegenCx<'ll, '_> { pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMConstBitCast(val, ty) } @@ -250,7 +246,7 @@ impl<'ll> CodegenCx<'ll, '_> { let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) { let llty = self.layout_of(ty).llvm_type(self); if let Some(g) = self.get_declared_value(sym) { - if self.val_ty(g) != self.type_ptr_to(llty) { + if self.val_ty(g) != self.type_ptr() { span_bug!(self.tcx.def_span(def_id), "Conflicting types for static"); } } @@ -551,16 +547,14 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { } } - /// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*. + /// Add a global value to a list to be stored in the `llvm.used` variable, an array of ptr. fn add_used_global(&self, global: &'ll Value) { - let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; - self.used_statics.borrow_mut().push(cast); + self.used_statics.borrow_mut().push(global); } /// Add a global value to a list to be stored in the `llvm.compiler.used` variable, - /// an array of i8*. + /// an array of ptr. fn add_compiler_used_global(&self, global: &'ll Value) { - let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; - self.compiler_used_statics.borrow_mut().push(cast); + self.compiler_used_statics.borrow_mut().push(global); } } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 5bf641055c5..b4f7e20e05d 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -59,17 +59,6 @@ pub struct CodegenCx<'ll, 'tcx> { /// Cache of constant strings, pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>, - /// Reverse-direction for const ptrs cast from globals. - /// - /// Key is a Value holding a `*T`, - /// Val is a Value holding a `*[T]`. - /// - /// Needed because LLVM loses pointer->pointee association - /// when we ptrcast, and we have to ptrcast during codegen - /// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not - /// a pointer to an LLVM array type. Similar for trait objects. - pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>, - /// Cache of emitted const globals (value -> global) pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>, @@ -156,6 +145,17 @@ pub unsafe fn create_module<'ll>( target_data_layout = target_data_layout.replace("-n32:64-", "-n64-"); } } + if llvm_version < (17, 0, 0) { + if sess.target.arch.starts_with("powerpc") { + // LLVM 17 specifies function pointer alignment for ppc: + // https://reviews.llvm.org/D147016 + target_data_layout = target_data_layout + .replace("-Fn32", "") + .replace("-Fi32", "") + .replace("-Fn64", "") + .replace("-Fi64", ""); + } + } // Ensure the data-layout values hardcoded remain the defaults. if sess.target.is_builtin { @@ -464,7 +464,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { instances: Default::default(), vtables: Default::default(), const_str_cache: Default::default(), - const_unsized: Default::default(), const_globals: Default::default(), statics_to_rauw: RefCell::new(Vec::new()), used_statics: RefCell::new(Vec::new()), @@ -495,7 +494,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) { let section = cstr!("llvm.metadata"); - let array = self.const_array(self.type_ptr_to(self.type_i8()), values); + let array = self.const_array(self.type_ptr(), values); unsafe { let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr()); @@ -673,7 +672,7 @@ impl<'ll> CodegenCx<'ll, '_> { ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false)) } - let i8p = self.type_i8p(); + let ptr = self.type_ptr(); let void = self.type_void(); let i1 = self.type_i1(); let t_i8 = self.type_i8(); @@ -687,7 +686,7 @@ impl<'ll> CodegenCx<'ll, '_> { let t_metadata = self.type_metadata(); let t_token = self.type_token(); - ifn!("llvm.wasm.get.exception", fn(t_token) -> i8p); + ifn!("llvm.wasm.get.exception", fn(t_token) -> ptr); ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32); ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32); @@ -723,7 +722,7 @@ impl<'ll> CodegenCx<'ll, '_> { ifn!("llvm.trap", fn() -> void); ifn!("llvm.debugtrap", fn() -> void); - ifn!("llvm.frameaddress", fn(t_i32) -> i8p); + ifn!("llvm.frameaddress", fn(t_i32) -> ptr); ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); @@ -890,43 +889,44 @@ impl<'ll> CodegenCx<'ll, '_> { ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64); ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128); - ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void); - ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void); + ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void); + ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void); ifn!("llvm.expect.i1", fn(i1, i1) -> i1); - ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); + ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32); ifn!("llvm.localescape", fn(...) -> void); - ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); - ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); + ifn!("llvm.localrecover", fn(ptr, ptr, t_i32) -> ptr); + ifn!("llvm.x86.seh.recoverfp", fn(ptr, ptr) -> ptr); ifn!("llvm.assume", fn(i1) -> void); - ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); + ifn!("llvm.prefetch", fn(ptr, t_i32, t_i32, t_i32) -> void); // This isn't an "LLVM intrinsic", but LLVM's optimization passes - // recognize it like one and we assume it exists in `core::slice::cmp` + // recognize it like one (including turning it into `bcmp` sometimes) + // and we use it to implement intrinsics like `raw_eq` and `compare_bytes` match self.sess().target.arch.as_ref() { - "avr" | "msp430" => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i16), - _ => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32), + "avr" | "msp430" => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i16), + _ => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i32), } // variadic intrinsics - ifn!("llvm.va_start", fn(i8p) -> void); - ifn!("llvm.va_end", fn(i8p) -> void); - ifn!("llvm.va_copy", fn(i8p, i8p) -> void); + ifn!("llvm.va_start", fn(ptr) -> void); + ifn!("llvm.va_end", fn(ptr) -> void); + ifn!("llvm.va_copy", fn(ptr, ptr) -> void); if self.sess().instrument_coverage() { - ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void); + ifn!("llvm.instrprof.increment", fn(ptr, t_i64, t_i32, t_i32) -> void); } - ifn!("llvm.type.test", fn(i8p, t_metadata) -> i1); - ifn!("llvm.type.checked.load", fn(i8p, t_i32, t_metadata) -> mk_struct! {i8p, i1}); + ifn!("llvm.type.test", fn(ptr, t_metadata) -> i1); + ifn!("llvm.type.checked.load", fn(ptr, t_i32, t_metadata) -> mk_struct! {ptr, i1}); if self.sess().opts.debuginfo != DebugInfo::None { ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void); ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void); } - ifn!("llvm.ptrmask", fn(i8p, t_isize) -> i8p); + ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr); None } @@ -940,12 +940,10 @@ impl<'ll> CodegenCx<'ll, '_> { let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() { Some(def_id) => self.get_static(def_id), _ => { - let ty = self - .type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false); + let ty = self.type_struct(&[self.type_ptr(), self.type_ptr()], false); self.declare_global("rust_eh_catch_typeinfo", ty) } }; - let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p()); self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo)); eh_catch_typeinfo } @@ -999,7 +997,7 @@ impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> { #[inline] fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { - if let LayoutError::SizeOverflow(_) = err { + if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err { self.sess().emit_fatal(Spanned { span, node: err.into_diagnostic() }) } else { span_bug!(span, "failed to get layout for `{ty}`: {err:?}") diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs index 1791ce4b315..7a82d05ce9e 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs @@ -1,4 +1,4 @@ -use rustc_middle::mir::coverage::{CounterValueReference, MappedExpressionIndex}; +use rustc_middle::mir::coverage::{CounterId, MappedExpressionIndex}; /// Must match the layout of `LLVMRustCounterKind`. #[derive(Copy, Clone, Debug)] @@ -36,11 +36,9 @@ impl Counter { Self { kind: CounterKind::Zero, id: 0 } } - /// Constructs a new `Counter` of kind `CounterValueReference`, and converts - /// the given 1-based counter_id to the required 0-based equivalent for - /// the `Counter` encoding. - pub fn counter_value_reference(counter_id: CounterValueReference) -> Self { - Self { kind: CounterKind::CounterValueReference, id: counter_id.zero_based_index() } + /// Constructs a new `Counter` of kind `CounterValueReference`. + pub fn counter_value_reference(counter_id: CounterId) -> Self { + Self { kind: CounterKind::CounterValueReference, id: counter_id.as_u32() } } /// Constructs a new `Counter` of kind `Expression`. @@ -87,3 +85,197 @@ impl CounterExpression { Self { kind, lhs, rhs } } } + +/// Corresponds to enum `llvm::coverage::CounterMappingRegion::RegionKind`. +/// +/// Must match the layout of `LLVMRustCounterMappingRegionKind`. +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub enum RegionKind { + /// A CodeRegion associates some code with a counter + CodeRegion = 0, + + /// An ExpansionRegion represents a file expansion region that associates + /// a source range with the expansion of a virtual source file, such as + /// for a macro instantiation or #include file. + ExpansionRegion = 1, + + /// A SkippedRegion represents a source range with code that was skipped + /// by a preprocessor or similar means. + SkippedRegion = 2, + + /// A GapRegion is like a CodeRegion, but its count is only set as the + /// line execution count when its the only region in the line. + GapRegion = 3, + + /// A BranchRegion represents leaf-level boolean expressions and is + /// associated with two counters, each representing the number of times the + /// expression evaluates to true or false. + BranchRegion = 4, +} + +/// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the +/// coverage map, in accordance with the +/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format). +/// The struct composes fields representing the `Counter` type and value(s) (injected counter +/// ID, or expression type and operands), the source file (an indirect index into a "filenames +/// array", encoded separately), and source location (start and end positions of the represented +/// code region). +/// +/// Corresponds to struct `llvm::coverage::CounterMappingRegion`. +/// +/// Must match the layout of `LLVMRustCounterMappingRegion`. +#[derive(Copy, Clone, Debug)] +#[repr(C)] +pub struct CounterMappingRegion { + /// The counter type and type-dependent counter data, if any. + counter: Counter, + + /// If the `RegionKind` is a `BranchRegion`, this represents the counter + /// for the false branch of the region. + false_counter: Counter, + + /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the + /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes + /// that, in turn, are used to look up the filename for this region. + file_id: u32, + + /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find + /// the mapping regions created as a result of macro expansion, by checking if their file id + /// matches the expanded file id. + expanded_file_id: u32, + + /// 1-based starting line of the mapping region. + start_line: u32, + + /// 1-based starting column of the mapping region. + start_col: u32, + + /// 1-based ending line of the mapping region. + end_line: u32, + + /// 1-based ending column of the mapping region. If the high bit is set, the current + /// mapping region is a gap area. + end_col: u32, + + kind: RegionKind, +} + +impl CounterMappingRegion { + pub(crate) fn code_region( + counter: Counter, + file_id: u32, + start_line: u32, + start_col: u32, + end_line: u32, + end_col: u32, + ) -> Self { + Self { + counter, + false_counter: Counter::zero(), + file_id, + expanded_file_id: 0, + start_line, + start_col, + end_line, + end_col, + kind: RegionKind::CodeRegion, + } + } + + // This function might be used in the future; the LLVM API is still evolving, as is coverage + // support. + #[allow(dead_code)] + pub(crate) fn branch_region( + counter: Counter, + false_counter: Counter, + file_id: u32, + start_line: u32, + start_col: u32, + end_line: u32, + end_col: u32, + ) -> Self { + Self { + counter, + false_counter, + file_id, + expanded_file_id: 0, + start_line, + start_col, + end_line, + end_col, + kind: RegionKind::BranchRegion, + } + } + + // This function might be used in the future; the LLVM API is still evolving, as is coverage + // support. + #[allow(dead_code)] + pub(crate) fn expansion_region( + file_id: u32, + expanded_file_id: u32, + start_line: u32, + start_col: u32, + end_line: u32, + end_col: u32, + ) -> Self { + Self { + counter: Counter::zero(), + false_counter: Counter::zero(), + file_id, + expanded_file_id, + start_line, + start_col, + end_line, + end_col, + kind: RegionKind::ExpansionRegion, + } + } + + // This function might be used in the future; the LLVM API is still evolving, as is coverage + // support. + #[allow(dead_code)] + pub(crate) fn skipped_region( + file_id: u32, + start_line: u32, + start_col: u32, + end_line: u32, + end_col: u32, + ) -> Self { + Self { + counter: Counter::zero(), + false_counter: Counter::zero(), + file_id, + expanded_file_id: 0, + start_line, + start_col, + end_line, + end_col, + kind: RegionKind::SkippedRegion, + } + } + + // This function might be used in the future; the LLVM API is still evolving, as is coverage + // support. + #[allow(dead_code)] + pub(crate) fn gap_region( + counter: Counter, + file_id: u32, + start_line: u32, + start_col: u32, + end_line: u32, + end_col: u32, + ) -> Self { + Self { + counter, + false_counter: Counter::zero(), + file_id, + expanded_file_id: 0, + start_line, + start_col, + end_line, + end_col: (1_u32 << 31) | end_col, + kind: RegionKind::GapRegion, + } + } +} diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs index 06844afd6b8..f1e68af25d4 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs @@ -1,26 +1,24 @@ -pub use super::ffi::*; +use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind}; use rustc_index::{IndexSlice, IndexVec}; use rustc_middle::bug; use rustc_middle::mir::coverage::{ - CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionId, - InjectedExpressionIndex, MappedExpressionIndex, Op, + CodeRegion, CounterId, ExpressionId, MappedExpressionIndex, Op, Operand, }; use rustc_middle::ty::Instance; use rustc_middle::ty::TyCtxt; #[derive(Clone, Debug, PartialEq)] pub struct Expression { - lhs: ExpressionOperandId, + lhs: Operand, op: Op, - rhs: ExpressionOperandId, + rhs: Operand, region: Option<CodeRegion>, } /// Collects all of the coverage regions associated with (a) injected counters, (b) counter /// expressions (additions or subtraction), and (c) unreachable regions (always counted as zero), -/// for a given Function. Counters and counter expressions have non-overlapping `id`s because they -/// can both be operands in an expression. This struct also stores the `function_source_hash`, +/// for a given Function. This struct also stores the `function_source_hash`, /// computed during instrumentation, and forwarded with counters. /// /// Note, it may be important to understand LLVM's definitions of `unreachable` regions versus "gap @@ -34,8 +32,8 @@ pub struct FunctionCoverage<'tcx> { instance: Instance<'tcx>, source_hash: u64, is_used: bool, - counters: IndexVec<CounterValueReference, Option<CodeRegion>>, - expressions: IndexVec<InjectedExpressionIndex, Option<Expression>>, + counters: IndexVec<CounterId, Option<CodeRegion>>, + expressions: IndexVec<ExpressionId, Option<Expression>>, unreachable_regions: Vec<CodeRegion>, } @@ -82,48 +80,36 @@ impl<'tcx> FunctionCoverage<'tcx> { } /// Adds a code region to be counted by an injected counter intrinsic. - pub fn add_counter(&mut self, id: CounterValueReference, region: CodeRegion) { + pub fn add_counter(&mut self, id: CounterId, region: CodeRegion) { if let Some(previous_region) = self.counters[id].replace(region.clone()) { assert_eq!(previous_region, region, "add_counter: code region for id changed"); } } /// Both counters and "counter expressions" (or simply, "expressions") can be operands in other - /// expressions. Expression IDs start from `u32::MAX` and go down, so the range of expression - /// IDs will not overlap with the range of counter IDs. Counters and expressions can be added in - /// any order, and expressions can still be assigned contiguous (though descending) IDs, without - /// knowing what the last counter ID will be. - /// - /// When storing the expression data in the `expressions` vector in the `FunctionCoverage` - /// struct, its vector index is computed, from the given expression ID, by subtracting from - /// `u32::MAX`. - /// - /// Since the expression operands (`lhs` and `rhs`) can reference either counters or - /// expressions, an operand that references an expression also uses its original ID, descending - /// from `u32::MAX`. Theses operands are translated only during code generation, after all - /// counters and expressions have been added. + /// expressions. These are tracked as separate variants of `Operand`, so there is no ambiguity + /// between operands that are counter IDs and operands that are expression IDs. pub fn add_counter_expression( &mut self, - expression_id: InjectedExpressionId, - lhs: ExpressionOperandId, + expression_id: ExpressionId, + lhs: Operand, op: Op, - rhs: ExpressionOperandId, + rhs: Operand, region: Option<CodeRegion>, ) { debug!( "add_counter_expression({:?}, lhs={:?}, op={:?}, rhs={:?} at {:?}", expression_id, lhs, op, rhs, region ); - let expression_index = self.expression_index(u32::from(expression_id)); debug_assert!( - expression_index.as_usize() < self.expressions.len(), - "expression_index {} is out of range for expressions.len() = {} + expression_id.as_usize() < self.expressions.len(), + "expression_id {} is out of range for expressions.len() = {} for {:?}", - expression_index.as_usize(), + expression_id.as_usize(), self.expressions.len(), self, ); - if let Some(previous_expression) = self.expressions[expression_index].replace(Expression { + if let Some(previous_expression) = self.expressions[expression_id].replace(Expression { lhs, op, rhs, @@ -186,14 +172,11 @@ impl<'tcx> FunctionCoverage<'tcx> { // This closure converts any `Expression` operand (`lhs` or `rhs` of the `Op::Add` or // `Op::Subtract` operation) into its native `llvm::coverage::Counter::CounterKind` type - // and value. Operand ID value `0` maps to `CounterKind::Zero`; values in the known range - // of injected LLVM counters map to `CounterKind::CounterValueReference` (and the value - // matches the injected counter index); and any other value is converted into a - // `CounterKind::Expression` with the expression's `new_index`. + // and value. // // Expressions will be returned from this function in a sequential vector (array) of // `CounterExpression`, so the expression IDs must be mapped from their original, - // potentially sparse set of indexes, originally in reverse order from `u32::MAX`. + // potentially sparse set of indexes. // // An `Expression` as an operand will have already been encountered as an `Expression` with // operands, so its new_index will already have been generated (as a 1-up index value). @@ -206,34 +189,19 @@ impl<'tcx> FunctionCoverage<'tcx> { // `expression_index`s lower than the referencing `Expression`. Therefore, it is // reasonable to look up the new index of an expression operand while the `new_indexes` // vector is only complete up to the current `ExpressionIndex`. - let id_to_counter = |new_indexes: &IndexSlice< - InjectedExpressionIndex, - Option<MappedExpressionIndex>, - >, - id: ExpressionOperandId| { - if id == ExpressionOperandId::ZERO { - Some(Counter::zero()) - } else if id.index() < self.counters.len() { - debug_assert!( - id.index() > 0, - "ExpressionOperandId indexes for counters are 1-based, but this id={}", - id.index() - ); - // Note: Some codegen-injected Counters may be only referenced by `Expression`s, - // and may not have their own `CodeRegion`s, - let index = CounterValueReference::from(id.index()); - // Note, the conversion to LLVM `Counter` adjusts the index to be zero-based. - Some(Counter::counter_value_reference(index)) - } else { - let index = self.expression_index(u32::from(id)); + type NewIndexes = IndexSlice<ExpressionId, Option<MappedExpressionIndex>>; + let id_to_counter = |new_indexes: &NewIndexes, operand: Operand| match operand { + Operand::Zero => Some(Counter::zero()), + Operand::Counter(id) => Some(Counter::counter_value_reference(id)), + Operand::Expression(id) => { self.expressions - .get(index) + .get(id) .expect("expression id is out of range") .as_ref() // If an expression was optimized out, assume it would have produced a count // of zero. This ensures that expressions dependent on optimized-out // expressions are still valid. - .map_or(Some(Counter::zero()), |_| new_indexes[index].map(Counter::expression)) + .map_or(Some(Counter::zero()), |_| new_indexes[id].map(Counter::expression)) } }; @@ -340,9 +308,4 @@ impl<'tcx> FunctionCoverage<'tcx> { fn unreachable_regions(&self) -> impl Iterator<Item = (Counter, &CodeRegion)> { self.unreachable_regions.iter().map(|region| (Counter::zero(), region)) } - - fn expression_index(&self, id_descending_from_max: u32) -> InjectedExpressionIndex { - debug_assert!(id_descending_from_max >= self.counters.len() as u32); - InjectedExpressionIndex::from(u32::MAX - id_descending_from_max) - } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index 9149b06886b..db9ba0abbde 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -1,9 +1,8 @@ use crate::common::CodegenCx; use crate::coverageinfo; -use crate::coverageinfo::map_data::{Counter, CounterExpression}; +use crate::coverageinfo::ffi::{Counter, CounterExpression, CounterMappingRegion}; use crate::llvm; -use llvm::coverageinfo::CounterMappingRegion; use rustc_codegen_ssa::traits::ConstMethods; use rustc_data_structures::fx::FxIndexSet; use rustc_hir::def::DefKind; diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 60cbb3d3d9d..afceb7531e6 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -3,10 +3,10 @@ use crate::llvm; use crate::abi::Abi; use crate::builder::Builder; use crate::common::CodegenCx; -use crate::coverageinfo::map_data::{CounterExpression, FunctionCoverage}; +use crate::coverageinfo::ffi::{CounterExpression, CounterMappingRegion}; +use crate::coverageinfo::map_data::FunctionCoverage; use libc::c_uint; -use llvm::coverageinfo::CounterMappingRegion; use rustc_codegen_ssa::traits::{ BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, MiscMethods, StaticMethods, @@ -16,9 +16,7 @@ use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_llvm::RustString; use rustc_middle::bug; -use rustc_middle::mir::coverage::{ - CodeRegion, CounterValueReference, CoverageKind, ExpressionOperandId, InjectedExpressionId, Op, -}; +use rustc_middle::mir::coverage::{CodeRegion, CounterId, CoverageKind, ExpressionId, Op, Operand}; use rustc_middle::mir::Coverage; use rustc_middle::ty; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; @@ -29,11 +27,11 @@ use rustc_middle::ty::Ty; use std::cell::RefCell; use std::ffi::CString; -mod ffi; +pub(crate) mod ffi; pub(crate) mod map_data; pub mod mapgen; -const UNUSED_FUNCTION_COUNTER_ID: CounterValueReference = CounterValueReference::START; +const UNUSED_FUNCTION_COUNTER_ID: CounterId = CounterId::START; const VAR_ALIGN_BYTES: usize = 8; @@ -125,7 +123,7 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { let fn_name = bx.get_pgo_func_name_var(instance); let hash = bx.const_u64(function_source_hash); let num_counters = bx.const_u32(coverageinfo.num_counters); - let index = bx.const_u32(id.zero_based_index()); + let index = bx.const_u32(id.as_u32()); debug!( "codegen intrinsic instrprof.increment(fn_name={:?}, hash={:?}, num_counters={:?}, index={:?})", fn_name, hash, num_counters, index, @@ -178,7 +176,7 @@ impl<'tcx> Builder<'_, '_, 'tcx> { fn add_coverage_counter( &mut self, instance: Instance<'tcx>, - id: CounterValueReference, + id: CounterId, region: CodeRegion, ) -> bool { if let Some(coverage_context) = self.coverage_context() { @@ -202,10 +200,10 @@ impl<'tcx> Builder<'_, '_, 'tcx> { fn add_coverage_counter_expression( &mut self, instance: Instance<'tcx>, - id: InjectedExpressionId, - lhs: ExpressionOperandId, + id: ExpressionId, + lhs: Operand, op: Op, - rhs: ExpressionOperandId, + rhs: Operand, region: Option<CodeRegion>, ) -> bool { if let Some(coverage_context) = self.coverage_context() { diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index ed962bb3e3c..ef7c661936a 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -17,8 +17,7 @@ use rustc_span::symbol::sym; /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) { if needs_gdb_debug_scripts_section(bx) { - let gdb_debug_scripts_section = - bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p()); + let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. let volatile_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 40f0bcfdf58..f8cbcbd5ec8 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -990,14 +990,8 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>( closure_or_generator_di_node: &'ll DIType, ) -> SmallVec<&'ll DIType> { let (&def_id, up_var_tys) = match closure_or_generator_ty.kind() { - ty::Generator(def_id, args, _) => { - let upvar_tys: SmallVec<_> = args.as_generator().prefix_tys().collect(); - (def_id, upvar_tys) - } - ty::Closure(def_id, args) => { - let upvar_tys: SmallVec<_> = args.as_closure().upvar_tys().collect(); - (def_id, upvar_tys) - } + ty::Generator(def_id, args, _) => (def_id, args.as_generator().prefix_tys()), + ty::Closure(def_id, args) => (def_id, args.as_closure().upvar_tys()), _ => { bug!( "build_upvar_field_di_nodes() called with non-closure-or-generator-type: {:?}", @@ -1007,9 +1001,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>( }; debug_assert!( - up_var_tys - .iter() - .all(|&t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t)) + up_var_tys.iter().all(|t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t)) ); let capture_names = cx.tcx.closure_saved_names_of_captured_variables(def_id); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs index b4beb80ca8b..d3239d5c358 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs @@ -379,6 +379,7 @@ pub fn build_generator_variant_struct_type_di_node<'ll, 'tcx>( // Fields that are common to all states let common_fields: SmallVec<_> = generator_args .prefix_tys() + .iter() .zip(common_upvar_names) .enumerate() .map(|(index, (upvar_ty, upvar_name))| { diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 43254bb4de8..a9b06030e70 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -167,7 +167,6 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { let ptr = args[0].immediate(); let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { let llty = ty.llvm_type(self); - let ptr = self.pointercast(ptr, self.type_ptr_to(llty)); self.volatile_load(llty, ptr) } else { self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr) @@ -317,18 +316,12 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { self.const_bool(true) } else if use_integer_compare { let integer_ty = self.type_ix(layout.size().bits()); - let ptr_ty = self.type_ptr_to(integer_ty); - let a_ptr = self.bitcast(a, ptr_ty); - let a_val = self.load(integer_ty, a_ptr, layout.align().abi); - let b_ptr = self.bitcast(b, ptr_ty); - let b_val = self.load(integer_ty, b_ptr, layout.align().abi); + let a_val = self.load(integer_ty, a, layout.align().abi); + let b_val = self.load(integer_ty, b, layout.align().abi); self.icmp(IntPredicate::IntEQ, a_val, b_val) } else { - let i8p_ty = self.type_i8p(); - let a_ptr = self.bitcast(a, i8p_ty); - let b_ptr = self.bitcast(b, i8p_ty); let n = self.const_usize(layout.size().bytes()); - let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]); + let cmp = self.call_intrinsic("memcmp", &[a, b, n]); match self.cx.sess().target.arch.as_ref() { "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)), _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)), @@ -336,6 +329,16 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { } } + sym::compare_bytes => { + // Here we assume that the `memcmp` provided by the target is a NOP for size 0. + let cmp = self.call_intrinsic( + "memcmp", + &[args[0].immediate(), args[1].immediate(), args[2].immediate()], + ); + // Some targets have `memcmp` returning `i16`, but the intrinsic is always `i32`. + self.sext(cmp, self.type_ix(32)) + } + sym::black_box => { args[0].val.store(self, result); let result_val_span = [result.llval]; @@ -383,10 +386,8 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { - let ptr_llty = self.type_ptr_to(ty.llvm_type(self)); - let ptr = self.pointercast(result.llval, ptr_llty); - self.store(llval, ptr, result.align); + if let PassMode::Cast(_, _) = &fn_abi.ret.mode { + self.store(llval, result.llval, result.align); } else { OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) .val @@ -410,9 +411,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value { // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time // optimization pass replaces calls to this intrinsic with code to test type membership. - let i8p_ty = self.type_i8p(); - let bitcast = self.bitcast(pointer, i8p_ty); - self.call_intrinsic("llvm.type.test", &[bitcast, typeid]) + self.call_intrinsic("llvm.type.test", &[pointer, typeid]) } fn type_checked_load( @@ -444,7 +443,7 @@ fn try_intrinsic<'ll>( dest: &'ll Value, ) { if bx.sess().panic_strategy() == PanicStrategy::Abort { - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.call(try_func_ty, None, None, try_func, &[data], None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. @@ -544,8 +543,8 @@ fn codegen_msvc_try<'ll>( // // More information can be found in libstd's seh.rs implementation. let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let slot = bx.alloca(bx.type_i8p(), ptr_align); - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let slot = bx.alloca(bx.type_ptr(), ptr_align); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None); bx.switch_to_block(normal); @@ -568,10 +567,10 @@ fn codegen_msvc_try<'ll>( // // When modifying, make sure that the type_name string exactly matches // the one used in library/panic_unwind/src/seh.rs. - let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p()); + let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr()); let type_name = bx.const_bytes(b"rust_panic\0"); let type_info = - bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false); + bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false); let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info)); unsafe { llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage); @@ -588,15 +587,15 @@ fn codegen_msvc_try<'ll>( bx.switch_to_block(catchpad_rust); let flags = bx.const_i32(8); let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]); - let ptr = bx.load(bx.type_i8p(), slot, ptr_align); - let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + let ptr = bx.load(bx.type_ptr(), slot, ptr_align); + let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet)); bx.catch_ret(&funclet, caught); // The flag value of 64 indicates a "catch-all". bx.switch_to_block(catchpad_foreign); let flags = bx.const_i32(64); - let null = bx.const_null(bx.type_i8p()); + let null = bx.const_null(bx.type_ptr()); let funclet = bx.catch_pad(cs, &[null, flags, null]); bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet)); bx.catch_ret(&funclet, caught); @@ -655,7 +654,7 @@ fn codegen_wasm_try<'ll>( // ret i32 1 // } // - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None); bx.switch_to_block(normal); @@ -665,13 +664,13 @@ fn codegen_wasm_try<'ll>( let cs = bx.catch_switch(None, None, &[catchpad]); bx.switch_to_block(catchpad); - let null = bx.const_null(bx.type_i8p()); + let null = bx.const_null(bx.type_ptr()); let funclet = bx.catch_pad(cs, &[null]); let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]); let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]); - let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet)); bx.catch_ret(&funclet, caught); @@ -723,7 +722,7 @@ fn codegen_gnu_try<'ll>( let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None); bx.switch_to_block(then); @@ -736,12 +735,12 @@ fn codegen_gnu_try<'ll>( // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. bx.switch_to_block(catch); - let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); + let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false); let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1); - let tydesc = bx.const_null(bx.type_i8p()); + let tydesc = bx.const_null(bx.type_ptr()); bx.add_clause(vals, tydesc); let ptr = bx.extract_value(vals, 0); - let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, None, catch_func, &[data, ptr], None); bx.ret(bx.const_i32(1)); }); @@ -787,7 +786,7 @@ fn codegen_emcc_try<'ll>( let try_func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); - let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); + let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None); bx.switch_to_block(then); @@ -800,10 +799,10 @@ fn codegen_emcc_try<'ll>( // the landing pad clauses the exception's type had been matched to. bx.switch_to_block(catch); let tydesc = bx.eh_catch_typeinfo(); - let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); + let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false); let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2); bx.add_clause(vals, tydesc); - bx.add_clause(vals, bx.const_null(bx.type_i8p())); + bx.add_clause(vals, bx.const_null(bx.type_ptr())); let ptr = bx.extract_value(vals, 0); let selector = bx.extract_value(vals, 1); @@ -816,7 +815,7 @@ fn codegen_emcc_try<'ll>( // create an alloca and pass a pointer to that. let ptr_align = bx.tcx().data_layout.pointer_align.abi; let i8_align = bx.tcx().data_layout.i8_align.abi; - let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false); + let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false); let catch_data = bx.alloca(catch_data_type, ptr_align); let catch_data_0 = bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]); @@ -824,9 +823,8 @@ fn codegen_emcc_try<'ll>( let catch_data_1 = bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]); bx.store(is_rust_panic, catch_data_1, i8_align); - let catch_data = bx.bitcast(catch_data, bx.type_i8p()); - let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); + let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None); bx.ret(bx.const_i32(1)); }); @@ -967,8 +965,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let place = PlaceRef::alloca(bx, args[0].layout); args[0].val.store(bx, place); let int_ty = bx.type_ix(expected_bytes * 8); - let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty)); - bx.load(int_ty, ptr, Align::ONE) + bx.load(int_ty, place.llval, Align::ONE) } _ => return_error!(InvalidMonomorphization::InvalidBitmask { span, @@ -1033,28 +1030,20 @@ fn generic_simd_intrinsic<'ll, 'tcx>( )); } - if let Some(stripped) = name.as_str().strip_prefix("simd_shuffle") { - // If this intrinsic is the older "simd_shuffleN" form, simply parse the integer. - // If there is no suffix, use the index array length. - let n: u64 = if stripped.is_empty() { - // Make sure this is actually an array, since typeck only checks the length-suffixed - // version of this intrinsic. - match args[2].layout.ty.kind() { - ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => { - len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else( - || span_bug!(span, "could not evaluate shuffle index array length"), - ) - } - _ => return_error!(InvalidMonomorphization::SimdShuffle { - span, - name, - ty: args[2].layout.ty - }), + if name == sym::simd_shuffle { + // Make sure this is actually an array, since typeck only checks the length-suffixed + // version of this intrinsic. + let n: u64 = match args[2].layout.ty.kind() { + ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => { + len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else( + || span_bug!(span, "could not evaluate shuffle index array length"), + ) } - } else { - stripped.parse().unwrap_or_else(|_| { - span_bug!(span, "bad `simd_shuffle` instruction only caught in codegen?") - }) + _ => return_error!(InvalidMonomorphization::SimdShuffle { + span, + name, + ty: args[2].layout.ty + }), }; require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); @@ -1217,7 +1206,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE); bx.store(ze, ptr, Align::ONE); let array_ty = bx.type_array(bx.type_i8(), expected_bytes); - let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty)); return Ok(bx.load(array_ty, ptr, Align::ONE)); } _ => return_error!(InvalidMonomorphization::CannotReturn { @@ -1321,50 +1309,34 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // FIXME: use: // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81 - fn llvm_vector_str( - elem_ty: Ty<'_>, - vec_len: u64, - no_pointers: usize, - bx: &Builder<'_, '_, '_>, - ) -> String { - let p0s: String = "p0".repeat(no_pointers); + fn llvm_vector_str(bx: &Builder<'_, '_, '_>, elem_ty: Ty<'_>, vec_len: u64) -> String { match *elem_ty.kind() { ty::Int(v) => format!( - "v{}{}i{}", + "v{}i{}", vec_len, - p0s, // Normalize to prevent crash if v: IntTy::Isize v.normalize(bx.target_spec().pointer_width).bit_width().unwrap() ), ty::Uint(v) => format!( - "v{}{}i{}", + "v{}i{}", vec_len, - p0s, // Normalize to prevent crash if v: UIntTy::Usize v.normalize(bx.target_spec().pointer_width).bit_width().unwrap() ), - ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()), + ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()), + ty::RawPtr(_) => format!("v{}p0", vec_len), _ => unreachable!(), } } - fn llvm_vector_ty<'ll>( - cx: &CodegenCx<'ll, '_>, - elem_ty: Ty<'_>, - vec_len: u64, - mut no_pointers: usize, - ) -> &'ll Type { - // FIXME: use cx.layout_of(ty).llvm_type() ? - let mut elem_ty = match *elem_ty.kind() { + fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type { + let elem_ty = match *elem_ty.kind() { ty::Int(v) => cx.type_int_from_ty(v), ty::Uint(v) => cx.type_uint_from_ty(v), ty::Float(v) => cx.type_float_from_ty(v), + ty::RawPtr(_) => cx.type_ptr(), _ => unreachable!(), }; - while no_pointers > 0 { - elem_ty = cx.type_ptr_to(elem_ty); - no_pointers -= 1; - } cx.type_vector(elem_ty, vec_len) } @@ -1419,47 +1391,26 @@ fn generic_simd_intrinsic<'ll, 'tcx>( InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty } ); - // This counts how many pointers - fn ptr_count(t: Ty<'_>) -> usize { - match t.kind() { - ty::RawPtr(p) => 1 + ptr_count(p.ty), - _ => 0, - } - } - - // Non-ptr type - fn non_ptr(t: Ty<'_>) -> Ty<'_> { - match t.kind() { - ty::RawPtr(p) => non_ptr(p.ty), - _ => t, - } - } - // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (pointer_count, underlying_ty) = match element_ty1.kind() { - ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)), - _ => { - require!( - false, - InvalidMonomorphization::ExpectedElementType { - span, - name, - expected_element: element_ty1, - second_arg: arg_tys[1], - in_elem, - in_ty, - mutability: ExpectedPointerMutability::Not, - } - ); - unreachable!(); + + require!( + matches!( + element_ty1.kind(), + ty::RawPtr(p) if p.ty == in_elem && p.ty.kind() == element_ty0.kind() + ), + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Not, } - }; - assert!(pointer_count > 0); - assert_eq!(pointer_count - 1, ptr_count(element_ty0)); - assert_eq!(underlying_ty, non_ptr(element_ty0)); + ); // The element type of the third argument must be a signed integer type of any width: let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); @@ -1490,12 +1441,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>( }; // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count); - let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx); + let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len); + let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); - let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx); + let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len); + let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len); let llvm_intrinsic = format!("llvm.masked.gather.{llvm_elem_vec_str}.{llvm_pointer_vec_str}"); @@ -1559,50 +1510,28 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } ); - // This counts how many pointers - fn ptr_count(t: Ty<'_>) -> usize { - match t.kind() { - ty::RawPtr(p) => 1 + ptr_count(p.ty), - _ => 0, - } - } - - // Non-ptr type - fn non_ptr(t: Ty<'_>) -> Ty<'_> { - match t.kind() { - ty::RawPtr(p) => non_ptr(p.ty), - _ => t, - } - } - // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); - let (pointer_count, underlying_ty) = match element_ty1.kind() { - ty::RawPtr(p) if p.ty == in_elem && p.mutbl.is_mut() => { - (ptr_count(element_ty1), non_ptr(element_ty1)) - } - _ => { - require!( - false, - InvalidMonomorphization::ExpectedElementType { - span, - name, - expected_element: element_ty1, - second_arg: arg_tys[1], - in_elem, - in_ty, - mutability: ExpectedPointerMutability::Mut, - } - ); - unreachable!(); + + require!( + matches!( + element_ty1.kind(), + ty::RawPtr(p) + if p.ty == in_elem && p.mutbl.is_mut() && p.ty.kind() == element_ty0.kind() + ), + InvalidMonomorphization::ExpectedElementType { + span, + name, + expected_element: element_ty1, + second_arg: arg_tys[1], + in_elem, + in_ty, + mutability: ExpectedPointerMutability::Mut, } - }; - assert!(pointer_count > 0); - assert_eq!(pointer_count - 1, ptr_count(element_ty0)); - assert_eq!(underlying_ty, non_ptr(element_ty0)); + ); // The element type of the third argument must be a signed integer type of any width: match element_ty2.kind() { @@ -1634,12 +1563,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let ret_t = bx.type_void(); // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count); - let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx); + let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len); + let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); - let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx); + let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len); + let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len); let llvm_intrinsic = format!("llvm.masked.scatter.{llvm_elem_vec_str}.{llvm_pointer_vec_str}"); @@ -1857,11 +1786,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } } - if in_elem == out_elem { - return Ok(args[0].immediate()); - } else { - return Ok(bx.pointercast(args[0].immediate(), llret_ty)); - } + return Ok(args[0].immediate()); } if name == sym::simd_expose_addr { @@ -2096,29 +2021,28 @@ fn generic_simd_intrinsic<'ll, 'tcx>( sym::simd_cttz => "cttz", _ => unreachable!(), }; - let llvm_intrinsic = &format!( - "llvm.{}.v{}i{}", - intrinsic_name, - in_len, - in_elem.int_size_and_signed(bx.tcx()).0.bits(), - ); + let int_size = in_elem.int_size_and_signed(bx.tcx()).0.bits(); + let llvm_intrinsic = &format!("llvm.{}.v{}i{}", intrinsic_name, in_len, int_size,); - return Ok(if matches!(name, sym::simd_ctlz | sym::simd_cttz) { + return if name == sym::simd_bswap && int_size == 8 { + // byte swap is no-op for i8/u8 + Ok(args[0].immediate()) + } else if matches!(name, sym::simd_ctlz | sym::simd_cttz) { let fn_ty = bx.type_func(&[vec_ty, bx.type_i1()], vec_ty); let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - bx.call( + Ok(bx.call( fn_ty, None, None, f, &[args[0].immediate(), bx.const_int(bx.type_i1(), 0)], None, - ) + )) } else { let fn_ty = bx.type_func(&[vec_ty], vec_ty); let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - bx.call(fn_ty, None, None, f, &[args[0].immediate()], None) - }); + Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None)) + }; } if name == sym::simd_arith_offset { diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index bb264db22d9..d283299ac46 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -28,7 +28,7 @@ pub use llvm_util::target_features; use rustc_ast::expand::allocator::AllocatorKind; use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; use rustc_codegen_ssa::back::write::{ - CodegenContext, FatLTOInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn, + CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn, }; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::ModuleCodegen; @@ -141,18 +141,6 @@ impl ExtraBackendMethods for LlvmCodegenBackend { back::write::target_machine_factory(sess, optlvl, target_features) } - fn spawn_thread<F, T>(time_trace: bool, f: F) -> std::thread::JoinHandle<T> - where - F: FnOnce() -> T, - F: Send + 'static, - T: Send + 'static, - { - std::thread::spawn(move || { - let _profiler = TimeTraceProfiler::new(time_trace); - f() - }) - } - fn spawn_named_thread<F, T>( time_trace: bool, name: String, @@ -212,7 +200,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { } fn run_fat_lto( cgcx: &CodegenContext<Self>, - modules: Vec<FatLTOInput<Self>>, + modules: Vec<FatLtoInput<Self>>, cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>, ) -> Result<LtoModuleCodegen<Self>, FatalError> { back::lto::run_fat(cgcx, modules, cached_modules) diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs index 45de284d22a..06e846a2b45 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs @@ -9,7 +9,7 @@ use libc::c_uint; use super::{DiagnosticInfo, SMDiagnostic}; use rustc_span::InnerSpan; -#[derive(Copy, Clone)] +#[derive(Copy, Clone, Debug)] pub enum OptimizationDiagnosticKind { OptimizationRemark, OptimizationMissed, diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 5fe6407ac4f..112b4a173a5 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1,8 +1,6 @@ #![allow(non_camel_case_types)] #![allow(non_upper_case_globals)] -use crate::coverageinfo::map_data as coverage_map; - use super::debuginfo::{ DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator, DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace, @@ -477,6 +475,8 @@ pub enum OptStage { pub struct SanitizerOptions { pub sanitize_address: bool, pub sanitize_address_recover: bool, + pub sanitize_cfi: bool, + pub sanitize_kcfi: bool, pub sanitize_memory: bool, pub sanitize_memory_recover: bool, pub sanitize_memory_track_origins: c_int, @@ -688,204 +688,6 @@ extern "C" { pub type DiagnosticHandlerTy = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void); pub type InlineAsmDiagHandlerTy = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint); -pub mod coverageinfo { - use super::coverage_map; - - /// Corresponds to enum `llvm::coverage::CounterMappingRegion::RegionKind`. - /// - /// Must match the layout of `LLVMRustCounterMappingRegionKind`. - #[derive(Copy, Clone, Debug)] - #[repr(C)] - pub enum RegionKind { - /// A CodeRegion associates some code with a counter - CodeRegion = 0, - - /// An ExpansionRegion represents a file expansion region that associates - /// a source range with the expansion of a virtual source file, such as - /// for a macro instantiation or #include file. - ExpansionRegion = 1, - - /// A SkippedRegion represents a source range with code that was skipped - /// by a preprocessor or similar means. - SkippedRegion = 2, - - /// A GapRegion is like a CodeRegion, but its count is only set as the - /// line execution count when its the only region in the line. - GapRegion = 3, - - /// A BranchRegion represents leaf-level boolean expressions and is - /// associated with two counters, each representing the number of times the - /// expression evaluates to true or false. - BranchRegion = 4, - } - - /// This struct provides LLVM's representation of a "CoverageMappingRegion", encoded into the - /// coverage map, in accordance with the - /// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format). - /// The struct composes fields representing the `Counter` type and value(s) (injected counter - /// ID, or expression type and operands), the source file (an indirect index into a "filenames - /// array", encoded separately), and source location (start and end positions of the represented - /// code region). - /// - /// Corresponds to struct `llvm::coverage::CounterMappingRegion`. - /// - /// Must match the layout of `LLVMRustCounterMappingRegion`. - #[derive(Copy, Clone, Debug)] - #[repr(C)] - pub struct CounterMappingRegion { - /// The counter type and type-dependent counter data, if any. - counter: coverage_map::Counter, - - /// If the `RegionKind` is a `BranchRegion`, this represents the counter - /// for the false branch of the region. - false_counter: coverage_map::Counter, - - /// An indirect reference to the source filename. In the LLVM Coverage Mapping Format, the - /// file_id is an index into a function-specific `virtual_file_mapping` array of indexes - /// that, in turn, are used to look up the filename for this region. - file_id: u32, - - /// If the `RegionKind` is an `ExpansionRegion`, the `expanded_file_id` can be used to find - /// the mapping regions created as a result of macro expansion, by checking if their file id - /// matches the expanded file id. - expanded_file_id: u32, - - /// 1-based starting line of the mapping region. - start_line: u32, - - /// 1-based starting column of the mapping region. - start_col: u32, - - /// 1-based ending line of the mapping region. - end_line: u32, - - /// 1-based ending column of the mapping region. If the high bit is set, the current - /// mapping region is a gap area. - end_col: u32, - - kind: RegionKind, - } - - impl CounterMappingRegion { - pub(crate) fn code_region( - counter: coverage_map::Counter, - file_id: u32, - start_line: u32, - start_col: u32, - end_line: u32, - end_col: u32, - ) -> Self { - Self { - counter, - false_counter: coverage_map::Counter::zero(), - file_id, - expanded_file_id: 0, - start_line, - start_col, - end_line, - end_col, - kind: RegionKind::CodeRegion, - } - } - - // This function might be used in the future; the LLVM API is still evolving, as is coverage - // support. - #[allow(dead_code)] - pub(crate) fn branch_region( - counter: coverage_map::Counter, - false_counter: coverage_map::Counter, - file_id: u32, - start_line: u32, - start_col: u32, - end_line: u32, - end_col: u32, - ) -> Self { - Self { - counter, - false_counter, - file_id, - expanded_file_id: 0, - start_line, - start_col, - end_line, - end_col, - kind: RegionKind::BranchRegion, - } - } - - // This function might be used in the future; the LLVM API is still evolving, as is coverage - // support. - #[allow(dead_code)] - pub(crate) fn expansion_region( - file_id: u32, - expanded_file_id: u32, - start_line: u32, - start_col: u32, - end_line: u32, - end_col: u32, - ) -> Self { - Self { - counter: coverage_map::Counter::zero(), - false_counter: coverage_map::Counter::zero(), - file_id, - expanded_file_id, - start_line, - start_col, - end_line, - end_col, - kind: RegionKind::ExpansionRegion, - } - } - - // This function might be used in the future; the LLVM API is still evolving, as is coverage - // support. - #[allow(dead_code)] - pub(crate) fn skipped_region( - file_id: u32, - start_line: u32, - start_col: u32, - end_line: u32, - end_col: u32, - ) -> Self { - Self { - counter: coverage_map::Counter::zero(), - false_counter: coverage_map::Counter::zero(), - file_id, - expanded_file_id: 0, - start_line, - start_col, - end_line, - end_col, - kind: RegionKind::SkippedRegion, - } - } - - // This function might be used in the future; the LLVM API is still evolving, as is coverage - // support. - #[allow(dead_code)] - pub(crate) fn gap_region( - counter: coverage_map::Counter, - file_id: u32, - start_line: u32, - start_col: u32, - end_line: u32, - end_col: u32, - ) -> Self { - Self { - counter, - false_counter: coverage_map::Counter::zero(), - file_id, - expanded_file_id: 0, - start_line, - start_col, - end_line, - end_col: (1_u32 << 31) | end_col, - kind: RegionKind::GapRegion, - } - } - } -} - pub mod debuginfo { use super::{InvariantOpaque, Metadata}; use bitflags::bitflags; @@ -1073,7 +875,7 @@ extern "C" { // Operations on array, pointer, and vector types (sequence types) pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type; - pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type; + pub fn LLVMPointerTypeInContext(C: &Context, AddressSpace: c_uint) -> &Type; pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type; pub fn LLVMGetElementType(Ty: &Type) -> &Type; @@ -1094,6 +896,7 @@ extern "C" { pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata); pub fn LLVMValueAsMetadata(Node: &Value) -> &Metadata; pub fn LLVMIsAFunction(Val: &Value) -> Option<&Value>; + pub fn LLVMRustIsNonGVFunctionPointerTy(Val: &Value) -> bool; // Operations on constants of any type pub fn LLVMConstNull(Ty: &Type) -> &Value; @@ -1911,9 +1714,9 @@ extern "C" { pub fn LLVMRustCoverageWriteMappingToBuffer( VirtualFileMappingIDs: *const c_uint, NumVirtualFileMappingIDs: c_uint, - Expressions: *const coverage_map::CounterExpression, + Expressions: *const crate::coverageinfo::ffi::CounterExpression, NumExpressions: c_uint, - MappingRegions: *const coverageinfo::CounterMappingRegion, + MappingRegions: *const crate::coverageinfo::ffi::CounterMappingRegion, NumMappingRegions: c_uint, BufferOut: &RustString, ); @@ -2338,6 +2141,7 @@ extern "C" { TM: &'a TargetMachine, OptLevel: PassBuilderOptLevel, OptStage: OptStage, + IsLinkerPluginLTO: bool, NoPrepopulatePasses: bool, VerifyIR: bool, UseThinLTOBuffers: bool, @@ -2532,6 +2336,7 @@ extern "C" { remark_passes: *const *const c_char, remark_passes_len: usize, remark_file: *const c_char, + pgo_available: bool, ); #[allow(improper_ctypes)] diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index 7e672a8dc33..8db6195d931 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -112,12 +112,6 @@ impl<'ll> CodegenCx<'ll, '_> { } } - pub(crate) fn type_pointee_for_align(&self, align: Align) -> &'ll Type { - // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = Integer::approximate_align(self, align); - self.type_from_integer(ity) - } - /// Return a LLVM type that has at most the required alignment, /// and exactly the required size, as a best-effort padding array. pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type { @@ -189,17 +183,12 @@ impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() } } - fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { - assert_ne!( - self.type_kind(ty), - TypeKind::Function, - "don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense" - ); - ty.ptr_to(AddressSpace::DATA) + fn type_ptr(&self) -> &'ll Type { + self.type_ptr_ext(AddressSpace::DATA) } - fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type { - ty.ptr_to(address_space) + fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type { + unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) } } fn element_type(&self, ty: &'ll Type) -> &'ll Type { @@ -247,12 +236,8 @@ impl Type { unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) } } - pub fn i8p_llcx(llcx: &llvm::Context) -> &Type { - Type::i8_llcx(llcx).ptr_to(AddressSpace::DATA) - } - - fn ptr_to(&self, address_space: AddressSpace) -> &Type { - unsafe { llvm::LLVMPointerType(self, address_space.0) } + pub fn ptr_llcx(llcx: &llvm::Context) -> &Type { + unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) } } } diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 2dbd467cc84..2be7bce115d 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>( match layout.abi { Abi::Scalar(_) => bug!("handled elsewhere"), Abi::Vector { element, count } => { - let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); + let element = layout.scalar_llvm_type_at(cx, element); return cx.type_vector(element, count); } Abi::ScalarPair(..) => { @@ -61,9 +61,6 @@ fn uncached_llvm_type<'a, 'tcx>( } Some(name) } - // Use identified structure types for ADT. Due to pointee types in LLVM IR their definition - // might be recursive. Other cases are non-recursive and we can use literal structure types. - ty::Adt(..) => Some(String::new()), _ => None, }; @@ -179,12 +176,7 @@ pub trait LayoutLlvmExt<'tcx> { fn is_llvm_scalar_pair(&self) -> bool; fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; - fn scalar_llvm_type_at<'a>( - &self, - cx: &CodegenCx<'a, 'tcx>, - scalar: Scalar, - offset: Size, - ) -> &'a Type; + fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type; fn scalar_pair_element_llvm_type<'a>( &self, cx: &CodegenCx<'a, 'tcx>, @@ -230,16 +222,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { return llty; } let llty = match *self.ty.kind() { - ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx)) - } - ty::Adt(def, _) if def.is_box() => { - cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) - } + ty::Ref(..) | ty::RawPtr(_) => cx.type_ptr(), + ty::Adt(def, _) if def.is_box() => cx.type_ptr(), ty::FnPtr(sig) => { cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty())) } - _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO), + _ => self.scalar_llvm_type_at(cx, scalar), }; cx.scalar_lltypes.borrow_mut().insert(self.ty, llty); return llty; @@ -300,25 +288,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { self.llvm_type(cx) } - fn scalar_llvm_type_at<'a>( - &self, - cx: &CodegenCx<'a, 'tcx>, - scalar: Scalar, - offset: Size, - ) -> &'a Type { + fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type { match scalar.primitive() { Int(i, _) => cx.type_from_integer(i), F32 => cx.type_f32(), F64 => cx.type_f64(), - Pointer(address_space) => { - // If we know the alignment, pick something better than i8. - let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { - cx.type_pointee_for_align(pointee.align) - } else { - cx.type_i8() - }; - cx.type_ptr_to_ext(pointee, address_space) - } + Pointer(address_space) => cx.type_ptr_ext(address_space), } } @@ -364,8 +339,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { return cx.type_i1(); } - let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) }; - self.scalar_llvm_type_at(cx, scalar, offset) + self.scalar_llvm_type_at(cx, scalar) } fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 { diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 8800caa71d6..172c66a7af1 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -5,7 +5,7 @@ use crate::value::Value; use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::{ common::IntPredicate, - traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}, + traits::{BaseTypeMethods, BuilderMethods, ConstMethods}, }; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; use rustc_middle::ty::Ty; @@ -26,24 +26,18 @@ fn round_pointer_up_to_alignment<'ll>( fn emit_direct_ptr_va_arg<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, list: OperandRef<'tcx, &'ll Value>, - llty: &'ll Type, size: Size, align: Align, slot_size: Align, allow_higher_align: bool, ) -> (&'ll Value, Align) { - let va_list_ty = bx.type_i8p(); - let va_list_ptr_ty = bx.type_ptr_to(va_list_ty); - let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty { - bx.bitcast(list.immediate(), va_list_ptr_ty) - } else { - list.immediate() - }; + let va_list_ty = bx.type_ptr(); + let va_list_addr = list.immediate(); let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi); let (addr, addr_align) = if allow_higher_align && align > slot_size { - (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align) + (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align) } else { (ptr, slot_size) }; @@ -56,9 +50,9 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big { let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]); - (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align) + (adjusted, addr_align) } else { - (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align) + (addr, addr_align) } } @@ -81,7 +75,7 @@ fn emit_ptr_va_arg<'ll, 'tcx>( (layout.llvm_type(bx.cx), layout.size, layout.align) }; let (addr, addr_align) = - emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align); + emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align); if indirect { let tmp_ret = bx.load(llty, addr, addr_align); bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi) @@ -146,7 +140,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( bx.cond_br(use_stack, on_stack, in_reg); bx.switch_to_block(in_reg); - let top_type = bx.type_i8p(); + let top_type = bx.type_ptr(); let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index); let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi); @@ -158,7 +152,6 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]); } let reg_type = layout.llvm_type(bx); - let reg_addr = bx.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type)); let reg_value = bx.load(reg_type, reg_addr, layout.align.abi); bx.br(end); @@ -218,7 +211,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>( // Work out the address of the value in the register save area. let reg_ptr = bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3)); - let reg_ptr_v = bx.load(bx.type_i8p(), reg_ptr, bx.tcx().data_layout.pointer_align.abi); + let reg_ptr_v = bx.load(bx.type_ptr(), reg_ptr, bx.tcx().data_layout.pointer_align.abi); let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8)); let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding)); let reg_addr = bx.gep(bx.type_i8(), reg_ptr_v, &[reg_off]); @@ -234,7 +227,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>( // Work out the address of the value in the argument overflow area. let arg_ptr = bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 2)); - let arg_ptr_v = bx.load(bx.type_i8p(), arg_ptr, bx.tcx().data_layout.pointer_align.abi); + let arg_ptr_v = bx.load(bx.type_ptr(), arg_ptr, bx.tcx().data_layout.pointer_align.abi); let arg_off = bx.const_u64(padding); let mem_addr = bx.gep(bx.type_i8(), arg_ptr_v, &[arg_off]); @@ -246,14 +239,12 @@ fn emit_s390x_va_arg<'ll, 'tcx>( // Return the appropriate result. bx.switch_to_block(end); - let val_addr = bx.phi(bx.type_i8p(), &[reg_addr, mem_addr], &[in_reg, in_mem]); + let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]); let val_type = layout.llvm_type(bx); let val_addr = if indirect { - let ptr_type = bx.cx.type_ptr_to(val_type); - let ptr_addr = bx.bitcast(val_addr, bx.cx.type_ptr_to(ptr_type)); - bx.load(ptr_type, ptr_addr, bx.tcx().data_layout.pointer_align.abi) + bx.load(bx.cx.type_ptr(), val_addr, bx.tcx().data_layout.pointer_align.abi) } else { - bx.bitcast(val_addr, bx.cx.type_ptr_to(val_type)) + val_addr }; bx.load(val_type, val_addr, layout.align.abi) } |
