diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/abi.rs | 134 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/asm.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/lto.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/write.rs | 3 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/builder.rs | 58 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/common.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/context.rs | 46 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs | 29 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/debuginfo/mod.rs | 6 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/declare.rs | 39 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 12 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 11 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/type_.rs | 3 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/type_of.rs | 42 |
15 files changed, 194 insertions, 205 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index e5f5146fac8..c0b43b77897 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -7,7 +7,7 @@ use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; -use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::MemFlags; use rustc_middle::bug; @@ -16,13 +16,15 @@ pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; use rustc_middle::ty::Ty; use rustc_session::config; pub use rustc_target::abi::call::*; -use rustc_target::abi::{self, HasDataLayout, Int}; +use rustc_target::abi::{self, HasDataLayout, Int, Size}; pub use rustc_target::spec::abi::Abi; use rustc_target::spec::SanitizerSet; use libc::c_uint; use smallvec::SmallVec; +use std::cmp; + pub trait ArgAttributesExt { fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value); fn apply_attrs_to_callsite( @@ -130,42 +132,39 @@ impl LlvmType for Reg { impl LlvmType for CastTarget { fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { let rest_ll_unit = self.rest.unit.llvm_type(cx); - let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 { - (0, 0) + let rest_count = if self.rest.total == Size::ZERO { + 0 } else { - ( - self.rest.total.bytes() / self.rest.unit.size.bytes(), - self.rest.total.bytes() % self.rest.unit.size.bytes(), - ) + assert_ne!( + self.rest.unit.size, + Size::ZERO, + "total size {:?} cannot be divided into units of zero size", + self.rest.total + ); + if self.rest.total.bytes() % self.rest.unit.size.bytes() != 0 { + assert_eq!(self.rest.unit.kind, RegKind::Integer, "only int regs can be split"); + } + self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes()) }; + // Simplify to a single unit or an array if there's no prefix. + // This produces the same layout, but using a simpler type. if self.prefix.iter().all(|x| x.is_none()) { - // Simplify to a single unit when there is no prefix and size <= unit size - if self.rest.total <= self.rest.unit.size { + // We can't do this if is_consecutive is set and the unit would get + // split on the target. Currently, this is only relevant for i128 + // registers. + if rest_count == 1 && (!self.rest.is_consecutive || self.rest.unit != Reg::i128()) { return rest_ll_unit; } - // Simplify to array when all chunks are the same size and type - if rem_bytes == 0 { - return cx.type_array(rest_ll_unit, rest_count); - } - } - - // Create list of fields in the main structure - let mut args: Vec<_> = self - .prefix - .iter() - .flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx))) - .chain((0..rest_count).map(|_| rest_ll_unit)) - .collect(); - - // Append final integer - if rem_bytes != 0 { - // Only integers can be really split further. - assert_eq!(self.rest.unit.kind, RegKind::Integer); - args.push(cx.type_ix(rem_bytes * 8)); + return cx.type_array(rest_ll_unit, rest_count); } + // Generate a struct type with the prefix and the "rest" arguments. + let prefix_args = + self.prefix.iter().flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx))); + let rest_args = (0..rest_count).map(|_| rest_ll_unit); + let args: Vec<_> = prefix_args.chain(rest_args).collect(); cx.type_struct(&args, false) } } @@ -208,54 +207,40 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // Sized indirect arguments PassMode::Indirect { attrs, meta_attrs: None, on_stack: _ } => { let align = attrs.pointee_align.unwrap_or(self.layout.align.abi); - OperandValue::Ref(val, None, align).store(bx, dst); + OperandValue::Ref(PlaceValue::new_sized(val, align)).store(bx, dst); } // Unsized indirect qrguments PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => { bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); } PassMode::Cast { cast, pad_i32: _ } => { - // FIXME(eddyb): Figure out when the simpler Store is safe, clang - // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. - let can_store_through_cast_ptr = false; - if can_store_through_cast_ptr { - bx.store(val, dst.llval, self.layout.align.abi); - } else { - // The actual return type is a struct, but the ABI - // adaptation code has cast it into some scalar type. The - // code that follows is the only reliable way I have - // found to do a transform like i64 -> {i32,i32}. - // Basically we dump the data onto the stack then memcpy it. - // - // Other approaches I tried: - // - Casting rust ret pointer to the foreign type and using Store - // is (a) unsafe if size of foreign type > size of rust type and - // (b) runs afoul of strict aliasing rules, yielding invalid - // assembly under -O (specifically, the store gets removed). - // - Truncating foreign type to correct integral type and then - // bitcasting to the struct type yields invalid cast errors. - - // We instead thus allocate some scratch space... - let scratch_size = cast.size(bx); - let scratch_align = cast.align(bx); - let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); - bx.lifetime_start(llscratch, scratch_size); - - // ... where we first store the value... - bx.store(val, llscratch, scratch_align); - - // ... and then memcpy it to the intended destination. - bx.memcpy( - dst.llval, - self.layout.align.abi, - llscratch, - scratch_align, - bx.const_usize(self.layout.size.bytes()), - MemFlags::empty(), - ); - - bx.lifetime_end(llscratch, scratch_size); - } + // The ABI mandates that the value is passed as a different struct representation. + // Spill and reload it from the stack to convert from the ABI representation to + // the Rust representation. + let scratch_size = cast.size(bx); + let scratch_align = cast.align(bx); + // Note that the ABI type may be either larger or smaller than the Rust type, + // due to the presence or absence of trailing padding. For example: + // - On some ABIs, the Rust layout { f64, f32, <f32 padding> } may omit padding + // when passed by value, making it smaller. + // - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes + // when passed by value, making it larger. + let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes()); + // Allocate some scratch space... + let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); + bx.lifetime_start(llscratch, scratch_size); + // ...store the value... + bx.store(val, llscratch, scratch_align); + // ... and then memcpy it to the intended destination. + bx.memcpy( + dst.val.llval, + self.layout.align.abi, + llscratch, + scratch_align, + bx.const_usize(copy_bytes), + MemFlags::empty(), + ); + bx.lifetime_end(llscratch, scratch_size); } _ => { OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst); @@ -280,7 +265,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { OperandValue::Pair(next(), next()).store(bx, dst); } PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => { - OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); + let place_val = PlaceValue { + llval: next(), + llextra: Some(next()), + align: self.layout.align.abi, + }; + OperandValue::Ref(place_val).store(bx, dst); } PassMode::Direct(_) | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 500904ce188..e09869cf425 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -220,7 +220,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { constraints.append(&mut clobbers); if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) { match asm_arch { - InlineAsmArch::AArch64 | InlineAsmArch::Arm => { + InlineAsmArch::AArch64 | InlineAsmArch::Arm64EC | InlineAsmArch::Arm => { constraints.push("~{cc}".to_string()); } InlineAsmArch::X86 | InlineAsmArch::X86_64 => { diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 06a681c24e6..e61af863dc0 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -608,7 +608,7 @@ pub(crate) fn run_pass_manager( "LTOPostLink".as_ptr().cast(), 11, ) { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( module.module_llvm.llmod(), llvm::LLVMModFlagBehavior::Error, c"LTOPostLink".as_ptr().cast(), diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 4efea66a7f1..49f9d7ddab6 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -880,6 +880,8 @@ pub(crate) unsafe fn codegen( config.emit_obj != EmitObj::None, dwarf_object_emitted, config.emit_bc, + config.emit_asm, + config.emit_ir, &cgcx.output_filenames, )) } @@ -912,6 +914,7 @@ fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool { || cgcx.opts.target_triple.triple().contains("-darwin") || cgcx.opts.target_triple.triple().contains("-tvos") || cgcx.opts.target_triple.triple().contains("-watchos") + || cgcx.opts.target_triple.triple().contains("-visionos") } fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool { diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 1a32958d362..160f361b9b5 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -20,12 +20,9 @@ use rustc_middle::ty::layout::{ FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout, }; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; +use rustc_sanitizers::{cfi, kcfi}; use rustc_session::config::OptLevel; use rustc_span::Span; -use rustc_symbol_mangling::typeid::{ - kcfi_typeid_for_fnabi, kcfi_typeid_for_instance, typeid_for_fnabi, typeid_for_instance, - TypeIdOptions, -}; use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange}; use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target}; use smallvec::SmallVec; @@ -538,7 +535,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { panic!("unsized locals must not be `extern` types"); } } - assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { return OperandRef::zero_sized(place.layout); @@ -582,13 +579,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - let val = if let Some(llextra) = place.llextra { - OperandValue::Ref(place.llval, Some(llextra), place.align) + let val = if let Some(_) = place.val.llextra { + // FIXME: Merge with the `else` below? + OperandValue::Ref(place.val) } else if place.layout.is_llvm_immediate() { let mut const_llval = None; let llty = place.layout.llvm_type(self); unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) { if llvm::LLVMIsGlobalConstant(global) == llvm::True { if let Some(init) = llvm::LLVMGetInitializer(global) { if self.val_ty(init) == llty { @@ -599,7 +597,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } let llval = const_llval.unwrap_or_else(|| { - let load = self.load(llty, place.llval, place.align); + let load = self.load(llty, place.val.llval, place.val.align); if let abi::Abi::Scalar(scalar) = place.layout.abi { scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO); } @@ -611,9 +609,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let mut load = |i, scalar: abi::Scalar, layout, align, offset| { let llptr = if i == 0 { - place.llval + place.val.llval } else { - self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes())) + self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes())) }; let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let load = self.load(llty, llptr, align); @@ -622,11 +620,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }; OperandValue::Pair( - load(0, a, place.layout, place.align, Size::ZERO), - load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset), + load(0, a, place.layout, place.val.align, Size::ZERO), + load(1, b, place.layout, place.val.align.restrict_for_offset(b_offset), b_offset), ) } else { - OperandValue::Ref(place.llval, None, place.align) + OperandValue::Ref(place.val) }; OperandRef { val, layout: place.layout } @@ -1151,12 +1149,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { order: rustc_codegen_ssa::common::AtomicOrdering, ) -> &'ll Value { // The only RMW operation that LLVM supports on pointers is compare-exchange. - if self.val_ty(src) == self.type_ptr() - && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg - { + let requires_cast_to_int = self.val_ty(src) == self.type_ptr() + && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg; + if requires_cast_to_int { src = self.ptrtoint(src, self.type_isize()); } - unsafe { + let mut res = unsafe { llvm::LLVMBuildAtomicRMW( self.llbuilder, AtomicRmwBinOp::from_generic(op), @@ -1165,7 +1163,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { AtomicOrdering::from_generic(order), llvm::False, // SingleThreaded ) + }; + if requires_cast_to_int { + res = self.inttoptr(res, self.type_ptr()); } + res } fn atomic_fence( @@ -1632,18 +1634,18 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { return; } - let mut options = TypeIdOptions::empty(); + let mut options = cfi::TypeIdOptions::empty(); if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { - options.insert(TypeIdOptions::GENERALIZE_POINTERS); + options.insert(cfi::TypeIdOptions::GENERALIZE_POINTERS); } if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { - options.insert(TypeIdOptions::NORMALIZE_INTEGERS); + options.insert(cfi::TypeIdOptions::NORMALIZE_INTEGERS); } let typeid = if let Some(instance) = instance { - typeid_for_instance(self.tcx, instance, options) + cfi::typeid_for_instance(self.tcx, instance, options) } else { - typeid_for_fnabi(self.tcx, fn_abi, options) + cfi::typeid_for_fnabi(self.tcx, fn_abi, options) }; let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap(); @@ -1680,18 +1682,18 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { return None; } - let mut options = TypeIdOptions::empty(); + let mut options = kcfi::TypeIdOptions::empty(); if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { - options.insert(TypeIdOptions::GENERALIZE_POINTERS); + options.insert(kcfi::TypeIdOptions::GENERALIZE_POINTERS); } if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { - options.insert(TypeIdOptions::NORMALIZE_INTEGERS); + options.insert(kcfi::TypeIdOptions::NORMALIZE_INTEGERS); } let kcfi_typeid = if let Some(instance) = instance { - kcfi_typeid_for_instance(self.tcx, instance, options) + kcfi::typeid_for_instance(self.tcx, instance, options) } else { - kcfi_typeid_for_fnabi(self.tcx, fn_abi, options) + kcfi::typeid_for_fnabi(self.tcx, fn_abi, options) }; Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)])) diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 25cbd90460f..568fcc3f3cf 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -160,6 +160,10 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { self.const_int(self.type_i32(), i as i64) } + fn const_i8(&self, i: i8) -> &'ll Value { + self.const_int(self.type_i8(), i as i64) + } + fn const_u32(&self, i: u32) -> &'ll Value { self.const_uint(self.type_i32(), i as u64) } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index df9f066e58a..d32baa6dc02 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -180,13 +180,13 @@ pub unsafe fn create_module<'ll>( // to ensure intrinsic calls don't use it. if !sess.needs_plt() { let avoid_plt = c"RtLibUseGOT".as_ptr().cast(); - llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1); + llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1); } // Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.) if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() { let canonical_jump_tables = c"CFI Canonical Jump Tables".as_ptr().cast(); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Override, canonical_jump_tables, @@ -197,7 +197,7 @@ pub unsafe fn create_module<'ll>( // Enable LTO unit splitting if specified or if CFI is enabled. (See https://reviews.llvm.org/D53891.) if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() { let enable_split_lto_unit = c"EnableSplitLTOUnit".as_ptr().cast(); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Override, enable_split_lto_unit, @@ -208,7 +208,7 @@ pub unsafe fn create_module<'ll>( // Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.) if sess.is_sanitizer_kcfi_enabled() { let kcfi = c"kcfi".as_ptr().cast(); - llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1); + llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1); } // Control Flow Guard is currently only supported by the MSVC linker on Windows. @@ -217,7 +217,7 @@ pub unsafe fn create_module<'ll>( CFGuard::Disabled => {} CFGuard::NoChecks => { // Set `cfguard=1` module flag to emit metadata only. - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Warning, c"cfguard".as_ptr() as *const _, @@ -226,7 +226,7 @@ pub unsafe fn create_module<'ll>( } CFGuard::Checks => { // Set `cfguard=2` module flag to emit metadata and checks. - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Warning, c"cfguard".as_ptr() as *const _, @@ -238,26 +238,26 @@ pub unsafe fn create_module<'ll>( if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection { if sess.target.arch == "aarch64" { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, c"branch-target-enforcement".as_ptr().cast(), bti.into(), ); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, c"sign-return-address".as_ptr().cast(), pac_ret.is_some().into(), ); let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A }); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, c"sign-return-address-all".as_ptr().cast(), pac_opts.leaf.into(), ); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, c"sign-return-address-with-bkey".as_ptr().cast(), @@ -273,7 +273,7 @@ pub unsafe fn create_module<'ll>( // Pass on the control-flow protection flags to LLVM (equivalent to `-fcf-protection` in Clang). if let CFProtection::Branch | CFProtection::Full = sess.opts.unstable_opts.cf_protection { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Override, c"cf-protection-branch".as_ptr().cast(), @@ -281,7 +281,7 @@ pub unsafe fn create_module<'ll>( ) } if let CFProtection::Return | CFProtection::Full = sess.opts.unstable_opts.cf_protection { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Override, c"cf-protection-return".as_ptr().cast(), @@ -290,7 +290,7 @@ pub unsafe fn create_module<'ll>( } if sess.opts.unstable_opts.virtual_function_elimination { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Error, c"Virtual Function Elim".as_ptr().cast(), @@ -300,7 +300,7 @@ pub unsafe fn create_module<'ll>( // Set module flag to enable Windows EHCont Guard (/guard:ehcont). if sess.opts.unstable_opts.ehcont_guard { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Warning, c"ehcontguard".as_ptr() as *const _, @@ -326,6 +326,22 @@ pub unsafe fn create_module<'ll>( llvm::LLVMMDNodeInContext(llcx, &name_metadata, 1), ); + // Emit RISC-V specific target-abi metadata + // to workaround lld as the LTO plugin not + // correctly setting target-abi for the LTO object + // FIXME: https://github.com/llvm/llvm-project/issues/50591 + // If llvm_abiname is empty, emit nothing. + let llvm_abiname = &sess.target.options.llvm_abiname; + if matches!(sess.target.arch.as_ref(), "riscv32" | "riscv64") && !llvm_abiname.is_empty() { + llvm::LLVMRustAddModuleFlagString( + llmod, + llvm::LLVMModFlagBehavior::Error, + c"target-abi".as_ptr(), + llvm_abiname.as_ptr().cast(), + llvm_abiname.len(), + ); + } + // Add module flags specified via -Z llvm_module_flag for (key, value, behavior) in &sess.opts.unstable_opts.llvm_module_flag { let key = format!("{key}\0"); @@ -341,7 +357,7 @@ pub unsafe fn create_module<'ll>( // We already checked this during option parsing _ => unreachable!(), }; - llvm::LLVMRustAddModuleFlag(llmod, behavior, key.as_ptr().cast(), *value) + llvm::LLVMRustAddModuleFlagU32(llmod, behavior, key.as_ptr().cast(), *value) } llmod diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index 278db21b0a1..3f3969bbca3 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -17,11 +17,11 @@ use rustc_span::Symbol; /// Generates and exports the Coverage Map. /// -/// Rust Coverage Map generation supports LLVM Coverage Mapping Format version -/// 6 (zero-based encoded as 5), as defined at -/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format). +/// Rust Coverage Map generation supports LLVM Coverage Mapping Format versions +/// 6 and 7 (encoded as 5 and 6 respectively), as described at +/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/18.0-2024-02-13/llvm/docs/CoverageMappingFormat.rst). /// These versions are supported by the LLVM coverage tools (`llvm-profdata` and `llvm-cov`) -/// bundled with Rust's fork of LLVM. +/// distributed in the `llvm-tools-preview` rustup component. /// /// Consequently, Rust's bundled version of Clang also generates Coverage Maps compliant with /// the same version. Clang's implementation of Coverage Map generation was referenced when @@ -31,10 +31,21 @@ use rustc_span::Symbol; pub fn finalize(cx: &CodegenCx<'_, '_>) { let tcx = cx.tcx; - // Ensure the installed version of LLVM supports Coverage Map Version 6 - // (encoded as a zero-based value: 5), which was introduced with LLVM 13. - let version = coverageinfo::mapping_version(); - assert_eq!(version, 5, "The `CoverageMappingVersion` exposed by `llvm-wrapper` is out of sync"); + // Ensure that LLVM is using a version of the coverage mapping format that + // agrees with our Rust-side code. Expected versions (encoded as n-1) are: + // - `CovMapVersion::Version6` (5) used by LLVM 13-17 + // - `CovMapVersion::Version7` (6) used by LLVM 18 + let covmap_version = { + let llvm_covmap_version = coverageinfo::mapping_version(); + let expected_versions = 5..=6; + assert!( + expected_versions.contains(&llvm_covmap_version), + "Coverage mapping version exposed by `llvm-wrapper` is out of sync; \ + expected {expected_versions:?} but was {llvm_covmap_version}" + ); + // This is the version number that we will embed in the covmap section: + llvm_covmap_version + }; debug!("Generating coverage map for CodegenUnit: `{}`", cx.codegen_unit.name()); @@ -74,7 +85,7 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) { // Generate the coverage map header, which contains the filenames used by // this CGU's coverage mappings, and store it in a well-known global. - let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val); + let cov_data_val = generate_coverage_map(cx, covmap_version, filenames_size, filenames_val); coverageinfo::save_cov_data_to_mod(cx, cov_data_val); let mut unused_function_names = Vec::new(); diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 68c1770c1d4..140566e8da9 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -24,8 +24,6 @@ pub(crate) mod ffi; pub(crate) mod map_data; pub mod mapgen; -const VAR_ALIGN: Align = Align::EIGHT; - /// A context object for maintaining all state needed by the coverageinfo module. pub struct CrateCoverageContext<'ll, 'tcx> { /// Coverage data for each instrumented function identified by DefId. @@ -226,7 +224,8 @@ pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>( llvm::set_global_constant(llglobal, true); llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::set_section(llglobal, &covmap_section_name); - llvm::set_alignment(llglobal, VAR_ALIGN); + // LLVM's coverage mapping format specifies 8-byte alignment for items in this section. + llvm::set_alignment(llglobal, Align::EIGHT); cx.add_used_global(llglobal); } @@ -256,7 +255,8 @@ pub(crate) fn save_func_record_to_mod<'ll, 'tcx>( llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage); llvm::set_visibility(llglobal, llvm::Visibility::Hidden); llvm::set_section(llglobal, covfun_section_name); - llvm::set_alignment(llglobal, VAR_ALIGN); + // LLVM's coverage mapping format specifies 8-byte alignment for items in this section. + llvm::set_alignment(llglobal, Align::EIGHT); llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name); cx.add_used_global(llglobal); } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index d3a851b40c0..4fdaa59e0e5 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -110,7 +110,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { .unstable_opts .dwarf_version .unwrap_or(sess.target.default_dwarf_version); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( self.llmod, llvm::LLVMModFlagBehavior::Warning, c"Dwarf Version".as_ptr().cast(), @@ -118,7 +118,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { ); } else { // Indicate that we want CodeView debug information on MSVC - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( self.llmod, llvm::LLVMModFlagBehavior::Warning, c"CodeView".as_ptr().cast(), @@ -127,7 +127,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { } // Prevent bitcode readers from deleting the debug info. - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( self.llmod, llvm::LLVMModFlagBehavior::Warning, c"Debug Info Version".as_ptr().cast(), diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 3ef8538ced3..7117c4a0ed9 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -22,10 +22,7 @@ use itertools::Itertools; use rustc_codegen_ssa::traits::TypeMembershipMethods; use rustc_data_structures::fx::FxIndexSet; use rustc_middle::ty::{Instance, Ty}; -use rustc_symbol_mangling::typeid::{ - kcfi_typeid_for_fnabi, kcfi_typeid_for_instance, typeid_for_fnabi, typeid_for_instance, - TypeIdOptions, -}; +use rustc_sanitizers::{cfi, kcfi}; use smallvec::SmallVec; /// Declare a function. @@ -145,27 +142,29 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { if let Some(instance) = instance { let mut typeids = FxIndexSet::default(); for options in [ - TypeIdOptions::GENERALIZE_POINTERS, - TypeIdOptions::NORMALIZE_INTEGERS, - TypeIdOptions::NO_SELF_TYPE_ERASURE, + cfi::TypeIdOptions::GENERALIZE_POINTERS, + cfi::TypeIdOptions::NORMALIZE_INTEGERS, + cfi::TypeIdOptions::USE_CONCRETE_SELF, ] .into_iter() .powerset() - .map(TypeIdOptions::from_iter) + .map(cfi::TypeIdOptions::from_iter) { - let typeid = typeid_for_instance(self.tcx, instance, options); + let typeid = cfi::typeid_for_instance(self.tcx, instance, options); if typeids.insert(typeid.clone()) { self.add_type_metadata(llfn, typeid); } } } else { - for options in - [TypeIdOptions::GENERALIZE_POINTERS, TypeIdOptions::NORMALIZE_INTEGERS] - .into_iter() - .powerset() - .map(TypeIdOptions::from_iter) + for options in [ + cfi::TypeIdOptions::GENERALIZE_POINTERS, + cfi::TypeIdOptions::NORMALIZE_INTEGERS, + ] + .into_iter() + .powerset() + .map(cfi::TypeIdOptions::from_iter) { - let typeid = typeid_for_fnabi(self.tcx, fn_abi, options); + let typeid = cfi::typeid_for_fnabi(self.tcx, fn_abi, options); self.add_type_metadata(llfn, typeid); } } @@ -173,19 +172,19 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { if self.tcx.sess.is_sanitizer_kcfi_enabled() { // LLVM KCFI does not support multiple !kcfi_type attachments - let mut options = TypeIdOptions::empty(); + let mut options = kcfi::TypeIdOptions::empty(); if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { - options.insert(TypeIdOptions::GENERALIZE_POINTERS); + options.insert(kcfi::TypeIdOptions::GENERALIZE_POINTERS); } if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { - options.insert(TypeIdOptions::NORMALIZE_INTEGERS); + options.insert(kcfi::TypeIdOptions::NORMALIZE_INTEGERS); } if let Some(instance) = instance { - let kcfi_typeid = kcfi_typeid_for_instance(self.tcx, instance, options); + let kcfi_typeid = kcfi::typeid_for_instance(self.tcx, instance, options); self.set_kcfi_type_metadata(llfn, kcfi_typeid); } else { - let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options); + let kcfi_typeid = kcfi::typeid_for_fnabi(self.tcx, fn_abi, options); self.set_kcfi_type_metadata(llfn, kcfi_typeid); } } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index ab135e3ed64..2bed7c1bd1c 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -264,7 +264,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { llvm::LLVMSetAlignment(load, align); } if !result.layout.is_zst() { - self.store(load, result.llval, result.align); + self.store_to_place(load, result.val); } return Ok(()); } @@ -428,7 +428,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { sym::black_box => { args[0].val.store(self, result); - let result_val_span = [result.llval]; + let result_val_span = [result.val.llval]; // We need to "use" the argument in some way LLVM can't introspect, and on // targets that support it we can typically leverage inline assembly to do // this. LLVM's interpretation of inline assembly is that it's, well, a black @@ -482,7 +482,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { if !fn_abi.ret.is_ignore() { if let PassMode::Cast { .. } = &fn_abi.ret.mode { - self.store(llval, result.llval, result.align); + self.store(llval, result.val.llval, result.val.align); } else { OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) .val @@ -1065,7 +1065,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let place = PlaceRef::alloca(bx, args[0].layout); args[0].val.store(bx, place); let int_ty = bx.type_ix(expected_bytes * 8); - bx.load(int_ty, place.llval, Align::ONE) + bx.load(int_ty, place.val.llval, Align::ONE) } _ => return_error!(InvalidMonomorphization::InvalidBitmask { span, @@ -2111,7 +2111,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return Ok(args[0].immediate()); } - if name == sym::simd_expose_addr { + if name == sym::simd_expose_provenance { let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); require!( in_len == out_len, @@ -2139,7 +2139,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return Ok(bx.ptrtoint(args[0].immediate(), llret_ty)); } - if name == sym::simd_from_exposed_addr { + if name == sym::simd_with_exposed_provenance { let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); require!( in_len == out_len, diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 284bc74d5c4..5509baaa3e9 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1801,12 +1801,21 @@ extern "C" { /// /// In order for Rust-C LTO to work, module flags must be compatible with Clang. What /// "compatible" means depends on the merge behaviors involved. - pub fn LLVMRustAddModuleFlag( + pub fn LLVMRustAddModuleFlagU32( M: &Module, merge_behavior: LLVMModFlagBehavior, name: *const c_char, value: u32, ); + + pub fn LLVMRustAddModuleFlagString( + M: &Module, + merge_behavior: LLVMModFlagBehavior, + name: *const c_char, + value: *const c_char, + value_len: size_t, + ); + pub fn LLVMRustHasModuleFlag(M: &Module, name: *const c_char, len: size_t) -> bool; pub fn LLVMRustDIBuilderCreate(M: &Module) -> &mut DIBuilder<'_>; diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index af1bbda4d08..a00f09dc40d 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -281,9 +281,6 @@ impl<'ll, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn reg_backend_type(&self, ty: &Reg) -> &'ll Type { ty.llvm_type(self) } - fn scalar_copy_backend_type(&self, layout: TyAndLayout<'tcx>) -> Option<Self::Type> { - layout.scalar_copy_llvm_type(self) - } } impl<'ll, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'ll, 'tcx> { diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index d10a083765b..40ed6baa610 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -5,7 +5,6 @@ use rustc_middle::bug; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, Ty, TypeVisitableExt}; -use rustc_target::abi::HasDataLayout; use rustc_target::abi::{Abi, Align, FieldsShape}; use rustc_target::abi::{Int, Pointer, F128, F16, F32, F64}; use rustc_target::abi::{Scalar, Size, Variants}; @@ -166,7 +165,6 @@ pub trait LayoutLlvmExt<'tcx> { index: usize, immediate: bool, ) -> &'a Type; - fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>; } impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { @@ -308,44 +306,4 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { self.scalar_llvm_type_at(cx, scalar) } - - fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type> { - debug_assert!(self.is_sized()); - - // FIXME: this is a fairly arbitrary choice, but 128 bits on WASM - // (matching the 128-bit SIMD types proposal) and 256 bits on x64 - // (like AVX2 registers) seems at least like a tolerable starting point. - let threshold = cx.data_layout().pointer_size * 4; - if self.layout.size() > threshold { - return None; - } - - // Vectors, even for non-power-of-two sizes, have the same layout as - // arrays but don't count as aggregate types - // While LLVM theoretically supports non-power-of-two sizes, and they - // often work fine, sometimes x86-isel deals with them horribly - // (see #115212) so for now only use power-of-two ones. - if let FieldsShape::Array { count, .. } = self.layout.fields() - && count.is_power_of_two() - && let element = self.field(cx, 0) - && element.ty.is_integral() - { - // `cx.type_ix(bits)` is tempting here, but while that works great - // for things that *stay* as memory-to-memory copies, it also ends - // up suppressing vectorization as it introduces shifts when it - // extracts all the individual values. - - let ety = element.llvm_type(cx); - if *count == 1 { - // Emitting `<1 x T>` would be silly; just use the scalar. - return Some(ety); - } else { - return Some(cx.type_vector(ety, *count)); - } - } - - // FIXME: The above only handled integer arrays; surely more things - // would also be possible. Be careful about provenance, though! - None - } } |
