diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
20 files changed, 426 insertions, 377 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index e5f5146fac8..c0b43b77897 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -7,7 +7,7 @@ use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; -use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::MemFlags; use rustc_middle::bug; @@ -16,13 +16,15 @@ pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; use rustc_middle::ty::Ty; use rustc_session::config; pub use rustc_target::abi::call::*; -use rustc_target::abi::{self, HasDataLayout, Int}; +use rustc_target::abi::{self, HasDataLayout, Int, Size}; pub use rustc_target::spec::abi::Abi; use rustc_target::spec::SanitizerSet; use libc::c_uint; use smallvec::SmallVec; +use std::cmp; + pub trait ArgAttributesExt { fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value); fn apply_attrs_to_callsite( @@ -130,42 +132,39 @@ impl LlvmType for Reg { impl LlvmType for CastTarget { fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { let rest_ll_unit = self.rest.unit.llvm_type(cx); - let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 { - (0, 0) + let rest_count = if self.rest.total == Size::ZERO { + 0 } else { - ( - self.rest.total.bytes() / self.rest.unit.size.bytes(), - self.rest.total.bytes() % self.rest.unit.size.bytes(), - ) + assert_ne!( + self.rest.unit.size, + Size::ZERO, + "total size {:?} cannot be divided into units of zero size", + self.rest.total + ); + if self.rest.total.bytes() % self.rest.unit.size.bytes() != 0 { + assert_eq!(self.rest.unit.kind, RegKind::Integer, "only int regs can be split"); + } + self.rest.total.bytes().div_ceil(self.rest.unit.size.bytes()) }; + // Simplify to a single unit or an array if there's no prefix. + // This produces the same layout, but using a simpler type. if self.prefix.iter().all(|x| x.is_none()) { - // Simplify to a single unit when there is no prefix and size <= unit size - if self.rest.total <= self.rest.unit.size { + // We can't do this if is_consecutive is set and the unit would get + // split on the target. Currently, this is only relevant for i128 + // registers. + if rest_count == 1 && (!self.rest.is_consecutive || self.rest.unit != Reg::i128()) { return rest_ll_unit; } - // Simplify to array when all chunks are the same size and type - if rem_bytes == 0 { - return cx.type_array(rest_ll_unit, rest_count); - } - } - - // Create list of fields in the main structure - let mut args: Vec<_> = self - .prefix - .iter() - .flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx))) - .chain((0..rest_count).map(|_| rest_ll_unit)) - .collect(); - - // Append final integer - if rem_bytes != 0 { - // Only integers can be really split further. - assert_eq!(self.rest.unit.kind, RegKind::Integer); - args.push(cx.type_ix(rem_bytes * 8)); + return cx.type_array(rest_ll_unit, rest_count); } + // Generate a struct type with the prefix and the "rest" arguments. + let prefix_args = + self.prefix.iter().flat_map(|option_reg| option_reg.map(|reg| reg.llvm_type(cx))); + let rest_args = (0..rest_count).map(|_| rest_ll_unit); + let args: Vec<_> = prefix_args.chain(rest_args).collect(); cx.type_struct(&args, false) } } @@ -208,54 +207,40 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { // Sized indirect arguments PassMode::Indirect { attrs, meta_attrs: None, on_stack: _ } => { let align = attrs.pointee_align.unwrap_or(self.layout.align.abi); - OperandValue::Ref(val, None, align).store(bx, dst); + OperandValue::Ref(PlaceValue::new_sized(val, align)).store(bx, dst); } // Unsized indirect qrguments PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => { bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); } PassMode::Cast { cast, pad_i32: _ } => { - // FIXME(eddyb): Figure out when the simpler Store is safe, clang - // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. - let can_store_through_cast_ptr = false; - if can_store_through_cast_ptr { - bx.store(val, dst.llval, self.layout.align.abi); - } else { - // The actual return type is a struct, but the ABI - // adaptation code has cast it into some scalar type. The - // code that follows is the only reliable way I have - // found to do a transform like i64 -> {i32,i32}. - // Basically we dump the data onto the stack then memcpy it. - // - // Other approaches I tried: - // - Casting rust ret pointer to the foreign type and using Store - // is (a) unsafe if size of foreign type > size of rust type and - // (b) runs afoul of strict aliasing rules, yielding invalid - // assembly under -O (specifically, the store gets removed). - // - Truncating foreign type to correct integral type and then - // bitcasting to the struct type yields invalid cast errors. - - // We instead thus allocate some scratch space... - let scratch_size = cast.size(bx); - let scratch_align = cast.align(bx); - let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); - bx.lifetime_start(llscratch, scratch_size); - - // ... where we first store the value... - bx.store(val, llscratch, scratch_align); - - // ... and then memcpy it to the intended destination. - bx.memcpy( - dst.llval, - self.layout.align.abi, - llscratch, - scratch_align, - bx.const_usize(self.layout.size.bytes()), - MemFlags::empty(), - ); - - bx.lifetime_end(llscratch, scratch_size); - } + // The ABI mandates that the value is passed as a different struct representation. + // Spill and reload it from the stack to convert from the ABI representation to + // the Rust representation. + let scratch_size = cast.size(bx); + let scratch_align = cast.align(bx); + // Note that the ABI type may be either larger or smaller than the Rust type, + // due to the presence or absence of trailing padding. For example: + // - On some ABIs, the Rust layout { f64, f32, <f32 padding> } may omit padding + // when passed by value, making it smaller. + // - On some ABIs, the Rust layout { u16, u16, u16 } may be padded up to 8 bytes + // when passed by value, making it larger. + let copy_bytes = cmp::min(scratch_size.bytes(), self.layout.size.bytes()); + // Allocate some scratch space... + let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); + bx.lifetime_start(llscratch, scratch_size); + // ...store the value... + bx.store(val, llscratch, scratch_align); + // ... and then memcpy it to the intended destination. + bx.memcpy( + dst.val.llval, + self.layout.align.abi, + llscratch, + scratch_align, + bx.const_usize(copy_bytes), + MemFlags::empty(), + ); + bx.lifetime_end(llscratch, scratch_size); } _ => { OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst); @@ -280,7 +265,12 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { OperandValue::Pair(next(), next()).store(bx, dst); } PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => { - OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); + let place_val = PlaceValue { + llval: next(), + llextra: Some(next()), + align: self.layout.align.abi, + }; + OperandValue::Ref(place_val).store(bx, dst); } PassMode::Direct(_) | PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: _ } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 74539d4d495..e09869cf425 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -220,7 +220,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { constraints.append(&mut clobbers); if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) { match asm_arch { - InlineAsmArch::AArch64 | InlineAsmArch::Arm => { + InlineAsmArch::AArch64 | InlineAsmArch::Arm64EC | InlineAsmArch::Arm => { constraints.push("~{cc}".to_string()); } InlineAsmArch::X86 | InlineAsmArch::X86_64 => { @@ -466,11 +466,11 @@ pub(crate) fn inline_asm_call<'ll>( let call = if !labels.is_empty() { assert!(catch_funclet.is_none()); - bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None) + bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None, None) } else if let Some((catch, funclet)) = catch_funclet { - bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet) + bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet, None) } else { - bx.call(fty, None, None, v, inputs, None) + bx.call(fty, None, None, v, inputs, None, None) }; // Store mark in a metadata node so we can map LLVM errors diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index f9eaa0d94cb..870e5ab3296 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -417,7 +417,7 @@ pub fn from_fn_attrs<'ll, 'tcx>( to_add.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry")); } if let Some(align) = codegen_fn_attrs.alignment { - llvm::set_alignment(llfn, align as usize); + llvm::set_alignment(llfn, align); } to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize)); diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 06a681c24e6..e61af863dc0 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -608,7 +608,7 @@ pub(crate) fn run_pass_manager( "LTOPostLink".as_ptr().cast(), 11, ) { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( module.module_llvm.llmod(), llvm::LLVMModFlagBehavior::Error, c"LTOPostLink".as_ptr().cast(), diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 031bbd63361..49f9d7ddab6 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -29,7 +29,8 @@ use rustc_data_structures::small_c_str::SmallCStr; use rustc_errors::{DiagCtxt, FatalError, Level}; use rustc_fs_util::{link_or_copy, path_to_c_string}; use rustc_middle::ty::TyCtxt; -use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath}; +use rustc_session::config::{self, Lto, OutputType, Passes}; +use rustc_session::config::{RemapPathScopeComponents, SplitDwarfKind, SwitchWithOptPath}; use rustc_session::Session; use rustc_span::symbol::sym; use rustc_span::InnerSpan; @@ -257,18 +258,17 @@ pub fn target_machine_factory( }; let debuginfo_compression = SmallCStr::new(&debuginfo_compression); - let should_prefer_remapped_for_split_debuginfo_paths = - sess.should_prefer_remapped_for_split_debuginfo_paths(); + let file_name_display_preference = + sess.filename_display_preference(RemapPathScopeComponents::DEBUGINFO); Arc::new(move |config: TargetMachineFactoryConfig| { let path_to_cstring_helper = |path: Option<PathBuf>| -> CString { let path = path.unwrap_or_default(); - let path = if should_prefer_remapped_for_split_debuginfo_paths { - path_mapping.map_prefix(path).0 - } else { - path.into() - }; - CString::new(path.to_str().unwrap()).unwrap() + let path = path_mapping + .to_real_filename(path) + .to_string_lossy(file_name_display_preference) + .into_owned(); + CString::new(path).unwrap() }; let split_dwarf_file = path_to_cstring_helper(config.split_dwarf_file); @@ -880,6 +880,8 @@ pub(crate) unsafe fn codegen( config.emit_obj != EmitObj::None, dwarf_object_emitted, config.emit_bc, + config.emit_asm, + config.emit_ir, &cgcx.output_filenames, )) } @@ -912,6 +914,7 @@ fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool { || cgcx.opts.target_triple.triple().contains("-darwin") || cgcx.opts.target_triple.triple().contains("-tvos") || cgcx.opts.target_triple.triple().contains("-watchos") + || cgcx.opts.target_triple.triple().contains("-visionos") } fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool { diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 7f87d06d6ef..160f361b9b5 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -19,9 +19,10 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::layout::{ FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout, }; -use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; +use rustc_sanitizers::{cfi, kcfi}; +use rustc_session::config::OptLevel; use rustc_span::Span; -use rustc_symbol_mangling::typeid::{kcfi_typeid_for_fnabi, typeid_for_fnabi, TypeIdOptions}; use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange}; use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target}; use smallvec::SmallVec; @@ -221,6 +222,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { then: &'ll BasicBlock, catch: &'ll BasicBlock, funclet: Option<&Funclet<'ll>>, + instance: Option<Instance<'tcx>>, ) -> &'ll Value { debug!("invoke {:?} with args ({:?})", llfn, args); @@ -233,10 +235,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } // Emit CFI pointer type membership test - self.cfi_type_test(fn_attrs, fn_abi, llfn); + self.cfi_type_test(fn_attrs, fn_abi, instance, llfn); // Emit KCFI operand bundle - let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn); + let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); if let Some(kcfi_bundle) = kcfi_bundle { bundles.push(kcfi_bundle); @@ -533,7 +535,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { panic!("unsized locals must not be `extern` types"); } } - assert_eq!(place.llextra.is_some(), place.layout.is_unsized()); + assert_eq!(place.val.llextra.is_some(), place.layout.is_unsized()); if place.layout.is_zst() { return OperandRef::zero_sized(place.layout); @@ -547,6 +549,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { layout: TyAndLayout<'tcx>, offset: Size, ) { + if bx.cx.sess().opts.optimize == OptLevel::No { + // Don't emit metadata we're not going to use + return; + } + if !scalar.is_uninit_valid() { bx.noundef_metadata(load); } @@ -572,13 +579,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - let val = if let Some(llextra) = place.llextra { - OperandValue::Ref(place.llval, Some(llextra), place.align) + let val = if let Some(_) = place.val.llextra { + // FIXME: Merge with the `else` below? + OperandValue::Ref(place.val) } else if place.layout.is_llvm_immediate() { let mut const_llval = None; let llty = place.layout.llvm_type(self); unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(place.llval) { + if let Some(global) = llvm::LLVMIsAGlobalVariable(place.val.llval) { if llvm::LLVMIsGlobalConstant(global) == llvm::True { if let Some(init) = llvm::LLVMGetInitializer(global) { if self.val_ty(init) == llty { @@ -589,7 +597,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } let llval = const_llval.unwrap_or_else(|| { - let load = self.load(llty, place.llval, place.align); + let load = self.load(llty, place.val.llval, place.val.align); if let abi::Abi::Scalar(scalar) = place.layout.abi { scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO); } @@ -601,9 +609,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let mut load = |i, scalar: abi::Scalar, layout, align, offset| { let llptr = if i == 0 { - place.llval + place.val.llval } else { - self.inbounds_ptradd(place.llval, self.const_usize(b_offset.bytes())) + self.inbounds_ptradd(place.val.llval, self.const_usize(b_offset.bytes())) }; let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let load = self.load(llty, llptr, align); @@ -612,11 +620,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { }; OperandValue::Pair( - load(0, a, place.layout, place.align, Size::ZERO), - load(1, b, place.layout, place.align.restrict_for_offset(b_offset), b_offset), + load(0, a, place.layout, place.val.align, Size::ZERO), + load(1, b, place.layout, place.val.align.restrict_for_offset(b_offset), b_offset), ) } else { - OperandValue::Ref(place.llval, None, place.align) + OperandValue::Ref(place.val) }; OperandRef { val, layout: place.layout } @@ -663,6 +671,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { return; } + if self.cx.sess().opts.optimize == OptLevel::No { + // Don't emit metadata we're not going to use + return; + } + unsafe { let llty = self.cx.val_ty(load); let v = [ @@ -1235,6 +1248,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { llfn: &'ll Value, args: &[&'ll Value], funclet: Option<&Funclet<'ll>>, + instance: Option<Instance<'tcx>>, ) -> &'ll Value { debug!("call {:?} with args ({:?})", llfn, args); @@ -1247,10 +1261,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } // Emit CFI pointer type membership test - self.cfi_type_test(fn_attrs, fn_abi, llfn); + self.cfi_type_test(fn_attrs, fn_abi, instance, llfn); // Emit KCFI operand bundle - let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn); + let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); if let Some(kcfi_bundle) = kcfi_bundle { bundles.push(kcfi_bundle); @@ -1472,7 +1486,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value { let (ty, f) = self.cx.get_intrinsic(intrinsic); - self.call(ty, None, None, f, args, None) + self.call(ty, None, None, f, args, None, None) } fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { @@ -1530,7 +1544,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { format!("llvm.{instr}.sat.i{int_width}.f{float_width}") }; let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty)); - self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None) + self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None, None) } pub(crate) fn landing_pad( @@ -1558,6 +1572,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { default_dest: &'ll BasicBlock, indirect_dest: &[&'ll BasicBlock], funclet: Option<&Funclet<'ll>>, + instance: Option<Instance<'tcx>>, ) -> &'ll Value { debug!("invoke {:?} with args ({:?})", llfn, args); @@ -1570,10 +1585,10 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { } // Emit CFI pointer type membership test - self.cfi_type_test(fn_attrs, fn_abi, llfn); + self.cfi_type_test(fn_attrs, fn_abi, instance, llfn); // Emit KCFI operand bundle - let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn); + let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); if let Some(kcfi_bundle) = kcfi_bundle { bundles.push(kcfi_bundle); @@ -1605,6 +1620,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { &mut self, fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, + instance: Option<Instance<'tcx>>, llfn: &'ll Value, ) { let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) }; @@ -1618,15 +1634,19 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { return; } - let mut options = TypeIdOptions::empty(); + let mut options = cfi::TypeIdOptions::empty(); if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { - options.insert(TypeIdOptions::GENERALIZE_POINTERS); + options.insert(cfi::TypeIdOptions::GENERALIZE_POINTERS); } if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { - options.insert(TypeIdOptions::NORMALIZE_INTEGERS); + options.insert(cfi::TypeIdOptions::NORMALIZE_INTEGERS); } - let typeid = typeid_for_fnabi(self.tcx, fn_abi, options); + let typeid = if let Some(instance) = instance { + cfi::typeid_for_instance(self.tcx, instance, options) + } else { + cfi::typeid_for_fnabi(self.tcx, fn_abi, options) + }; let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap(); // Test whether the function pointer is associated with the type identifier. @@ -1648,6 +1668,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { &mut self, fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, + instance: Option<Instance<'tcx>>, llfn: &'ll Value, ) -> Option<llvm::OperandBundleDef<'ll>> { let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) }; @@ -1661,15 +1682,20 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { return None; } - let mut options = TypeIdOptions::empty(); + let mut options = kcfi::TypeIdOptions::empty(); if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { - options.insert(TypeIdOptions::GENERALIZE_POINTERS); + options.insert(kcfi::TypeIdOptions::GENERALIZE_POINTERS); } if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { - options.insert(TypeIdOptions::NORMALIZE_INTEGERS); + options.insert(kcfi::TypeIdOptions::NORMALIZE_INTEGERS); } - let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options); + let kcfi_typeid = if let Some(instance) = instance { + kcfi::typeid_for_instance(self.tcx, instance, options) + } else { + kcfi::typeid_for_fnabi(self.tcx, fn_abi, options) + }; + Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)])) } else { None diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 25cbd90460f..568fcc3f3cf 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -160,6 +160,10 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { self.const_int(self.type_i32(), i as i64) } + fn const_i8(&self, i: i8) -> &'ll Value { + self.const_int(self.type_i8(), i as i64) + } + fn const_u32(&self, i: u32) -> &'ll Value { self.const_uint(self.type_i32(), i as u64) } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 649ff9df2cc..d32baa6dc02 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -27,9 +27,7 @@ use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet}; use rustc_session::Session; use rustc_span::source_map::Spanned; use rustc_span::Span; -use rustc_target::abi::{ - call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx, -}; +use rustc_target::abi::{call::FnAbi, HasDataLayout, TargetDataLayout, VariantIdx}; use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel}; use smallvec::SmallVec; @@ -83,7 +81,6 @@ pub struct CodegenCx<'ll, 'tcx> { /// Mapping of scalar types to llvm types. pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>, - pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>, pub isize_ty: &'ll Type, pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>, @@ -183,13 +180,13 @@ pub unsafe fn create_module<'ll>( // to ensure intrinsic calls don't use it. if !sess.needs_plt() { let avoid_plt = c"RtLibUseGOT".as_ptr().cast(); - llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1); + llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1); } // Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.) if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() { let canonical_jump_tables = c"CFI Canonical Jump Tables".as_ptr().cast(); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Override, canonical_jump_tables, @@ -200,7 +197,7 @@ pub unsafe fn create_module<'ll>( // Enable LTO unit splitting if specified or if CFI is enabled. (See https://reviews.llvm.org/D53891.) if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() { let enable_split_lto_unit = c"EnableSplitLTOUnit".as_ptr().cast(); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Override, enable_split_lto_unit, @@ -211,7 +208,7 @@ pub unsafe fn create_module<'ll>( // Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.) if sess.is_sanitizer_kcfi_enabled() { let kcfi = c"kcfi".as_ptr().cast(); - llvm::LLVMRustAddModuleFlag(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1); + llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1); } // Control Flow Guard is currently only supported by the MSVC linker on Windows. @@ -220,7 +217,7 @@ pub unsafe fn create_module<'ll>( CFGuard::Disabled => {} CFGuard::NoChecks => { // Set `cfguard=1` module flag to emit metadata only. - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Warning, c"cfguard".as_ptr() as *const _, @@ -229,7 +226,7 @@ pub unsafe fn create_module<'ll>( } CFGuard::Checks => { // Set `cfguard=2` module flag to emit metadata and checks. - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Warning, c"cfguard".as_ptr() as *const _, @@ -241,26 +238,26 @@ pub unsafe fn create_module<'ll>( if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection { if sess.target.arch == "aarch64" { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, c"branch-target-enforcement".as_ptr().cast(), bti.into(), ); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, c"sign-return-address".as_ptr().cast(), pac_ret.is_some().into(), ); let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A }); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, c"sign-return-address-all".as_ptr().cast(), pac_opts.leaf.into(), ); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, c"sign-return-address-with-bkey".as_ptr().cast(), @@ -276,7 +273,7 @@ pub unsafe fn create_module<'ll>( // Pass on the control-flow protection flags to LLVM (equivalent to `-fcf-protection` in Clang). if let CFProtection::Branch | CFProtection::Full = sess.opts.unstable_opts.cf_protection { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Override, c"cf-protection-branch".as_ptr().cast(), @@ -284,7 +281,7 @@ pub unsafe fn create_module<'ll>( ) } if let CFProtection::Return | CFProtection::Full = sess.opts.unstable_opts.cf_protection { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Override, c"cf-protection-return".as_ptr().cast(), @@ -293,7 +290,7 @@ pub unsafe fn create_module<'ll>( } if sess.opts.unstable_opts.virtual_function_elimination { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Error, c"Virtual Function Elim".as_ptr().cast(), @@ -303,7 +300,7 @@ pub unsafe fn create_module<'ll>( // Set module flag to enable Windows EHCont Guard (/guard:ehcont). if sess.opts.unstable_opts.ehcont_guard { - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Warning, c"ehcontguard".as_ptr() as *const _, @@ -329,6 +326,22 @@ pub unsafe fn create_module<'ll>( llvm::LLVMMDNodeInContext(llcx, &name_metadata, 1), ); + // Emit RISC-V specific target-abi metadata + // to workaround lld as the LTO plugin not + // correctly setting target-abi for the LTO object + // FIXME: https://github.com/llvm/llvm-project/issues/50591 + // If llvm_abiname is empty, emit nothing. + let llvm_abiname = &sess.target.options.llvm_abiname; + if matches!(sess.target.arch.as_ref(), "riscv32" | "riscv64") && !llvm_abiname.is_empty() { + llvm::LLVMRustAddModuleFlagString( + llmod, + llvm::LLVMModFlagBehavior::Error, + c"target-abi".as_ptr(), + llvm_abiname.as_ptr().cast(), + llvm_abiname.len(), + ); + } + // Add module flags specified via -Z llvm_module_flag for (key, value, behavior) in &sess.opts.unstable_opts.llvm_module_flag { let key = format!("{key}\0"); @@ -344,7 +357,7 @@ pub unsafe fn create_module<'ll>( // We already checked this during option parsing _ => unreachable!(), }; - llvm::LLVMRustAddModuleFlag(llmod, behavior, key.as_ptr().cast(), *value) + llvm::LLVMRustAddModuleFlagU32(llmod, behavior, key.as_ptr().cast(), *value) } llmod @@ -450,7 +463,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { compiler_used_statics: RefCell::new(Vec::new()), type_lowering: Default::default(), scalar_lltypes: Default::default(), - pointee_infos: Default::default(), isize_ty, coverage_cx, dbg_cx, diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index ee7ea342301..3f3969bbca3 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -6,9 +6,8 @@ use crate::llvm; use itertools::Itertools as _; use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods}; -use rustc_data_structures::fx::{FxIndexMap, FxIndexSet}; -use rustc_hir::def::DefKind; -use rustc_hir::def_id::DefId; +use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet}; +use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_index::IndexVec; use rustc_middle::bug; use rustc_middle::mir; @@ -18,11 +17,11 @@ use rustc_span::Symbol; /// Generates and exports the Coverage Map. /// -/// Rust Coverage Map generation supports LLVM Coverage Mapping Format version -/// 6 (zero-based encoded as 5), as defined at -/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format). +/// Rust Coverage Map generation supports LLVM Coverage Mapping Format versions +/// 6 and 7 (encoded as 5 and 6 respectively), as described at +/// [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/rustc/18.0-2024-02-13/llvm/docs/CoverageMappingFormat.rst). /// These versions are supported by the LLVM coverage tools (`llvm-profdata` and `llvm-cov`) -/// bundled with Rust's fork of LLVM. +/// distributed in the `llvm-tools-preview` rustup component. /// /// Consequently, Rust's bundled version of Clang also generates Coverage Maps compliant with /// the same version. Clang's implementation of Coverage Map generation was referenced when @@ -32,10 +31,21 @@ use rustc_span::Symbol; pub fn finalize(cx: &CodegenCx<'_, '_>) { let tcx = cx.tcx; - // Ensure the installed version of LLVM supports Coverage Map Version 6 - // (encoded as a zero-based value: 5), which was introduced with LLVM 13. - let version = coverageinfo::mapping_version(); - assert_eq!(version, 5, "The `CoverageMappingVersion` exposed by `llvm-wrapper` is out of sync"); + // Ensure that LLVM is using a version of the coverage mapping format that + // agrees with our Rust-side code. Expected versions (encoded as n-1) are: + // - `CovMapVersion::Version6` (5) used by LLVM 13-17 + // - `CovMapVersion::Version7` (6) used by LLVM 18 + let covmap_version = { + let llvm_covmap_version = coverageinfo::mapping_version(); + let expected_versions = 5..=6; + assert!( + expected_versions.contains(&llvm_covmap_version), + "Coverage mapping version exposed by `llvm-wrapper` is out of sync; \ + expected {expected_versions:?} but was {llvm_covmap_version}" + ); + // This is the version number that we will embed in the covmap section: + llvm_covmap_version + }; debug!("Generating coverage map for CodegenUnit: `{}`", cx.codegen_unit.name()); @@ -75,7 +85,7 @@ pub fn finalize(cx: &CodegenCx<'_, '_>) { // Generate the coverage map header, which contains the filenames used by // this CGU's coverage mappings, and store it in a well-known global. - let cov_data_val = generate_coverage_map(cx, version, filenames_size, filenames_val); + let cov_data_val = generate_coverage_map(cx, covmap_version, filenames_size, filenames_val); coverageinfo::save_cov_data_to_mod(cx, cov_data_val); let mut unused_function_names = Vec::new(); @@ -174,8 +184,14 @@ impl GlobalFileTable { // Since rustc generates coverage maps with relative paths, the // compilation directory can be combined with the relative paths // to get absolute paths, if needed. + use rustc_session::config::RemapPathScopeComponents; use rustc_session::RemapFileNameExt; - let working_dir: &str = &tcx.sess.opts.working_dir.for_codegen(tcx.sess).to_string_lossy(); + let working_dir: &str = &tcx + .sess + .opts + .working_dir + .for_scope(tcx.sess, RemapPathScopeComponents::MACRO) + .to_string_lossy(); llvm::build_byte_buffer(|buffer| { coverageinfo::write_filenames_section_to_buffer( @@ -335,16 +351,9 @@ fn save_function_record( ); } -/// When finalizing the coverage map, `FunctionCoverage` only has the `CodeRegion`s and counters for -/// the functions that went through codegen; such as public functions and "used" functions -/// (functions referenced by other "used" or public items). Any other functions considered unused, -/// or "Unreachable", were still parsed and processed through the MIR stage, but were not -/// codegenned. (Note that `-Clink-dead-code` can force some unused code to be codegenned, but -/// that flag is known to cause other errors, when combined with `-C instrument-coverage`; and -/// `-Clink-dead-code` will not generate code for unused generic functions.) -/// -/// We can find the unused functions (including generic functions) by the set difference of all MIR -/// `DefId`s (`tcx` query `mir_keys`) minus the codegenned `DefId`s (`codegenned_and_inlined_items`). +/// Each CGU will normally only emit coverage metadata for the functions that it actually generates. +/// But since we don't want unused functions to disappear from coverage reports, we also scan for +/// functions that were instrumented but are not participating in codegen. /// /// These unused functions don't need to be codegenned, but we do need to add them to the function /// coverage map (in a single designated CGU) so that we still emit coverage mappings for them. @@ -354,75 +363,109 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) { assert!(cx.codegen_unit.is_code_coverage_dead_code_cgu()); let tcx = cx.tcx; + let usage = prepare_usage_sets(tcx); + + let is_unused_fn = |def_id: LocalDefId| -> bool { + let def_id = def_id.to_def_id(); + + // To be eligible for "unused function" mappings, a definition must: + // - Be function-like + // - Not participate directly in codegen (or have lost all its coverage statements) + // - Not have any coverage statements inlined into codegenned functions + tcx.def_kind(def_id).is_fn_like() + && (!usage.all_mono_items.contains(&def_id) + || usage.missing_own_coverage.contains(&def_id)) + && !usage.used_via_inlining.contains(&def_id) + }; - let eligible_def_ids = tcx.mir_keys(()).iter().filter_map(|local_def_id| { - let def_id = local_def_id.to_def_id(); - let kind = tcx.def_kind(def_id); - // `mir_keys` will give us `DefId`s for all kinds of things, not - // just "functions", like consts, statics, etc. Filter those out. - if !matches!(kind, DefKind::Fn | DefKind::AssocFn | DefKind::Closure) { - return None; - } + // Scan for unused functions that were instrumented for coverage. + for def_id in tcx.mir_keys(()).iter().copied().filter(|&def_id| is_unused_fn(def_id)) { + // Get the coverage info from MIR, skipping functions that were never instrumented. + let body = tcx.optimized_mir(def_id); + let Some(function_coverage_info) = body.function_coverage_info.as_deref() else { continue }; // FIXME(79651): Consider trying to filter out dummy instantiations of // unused generic functions from library crates, because they can produce // "unused instantiation" in coverage reports even when they are actually // used by some downstream crate in the same binary. - Some(local_def_id.to_def_id()) - }); - - let codegenned_def_ids = codegenned_and_inlined_items(tcx); - - // For each `DefId` that should have coverage instrumentation but wasn't - // codegenned, add it to the function coverage map as an unused function. - for def_id in eligible_def_ids.filter(|id| !codegenned_def_ids.contains(id)) { - // Skip any function that didn't have coverage data added to it by the - // coverage instrumentor. - let body = tcx.instance_mir(ty::InstanceDef::Item(def_id)); - let Some(function_coverage_info) = body.function_coverage_info.as_deref() else { - continue; - }; - debug!("generating unused fn: {def_id:?}"); - let instance = declare_unused_fn(tcx, def_id); - add_unused_function_coverage(cx, instance, function_coverage_info); + add_unused_function_coverage(cx, def_id, function_coverage_info); } } -/// All items participating in code generation together with (instrumented) -/// items inlined into them. -fn codegenned_and_inlined_items(tcx: TyCtxt<'_>) -> DefIdSet { - let (items, cgus) = tcx.collect_and_partition_mono_items(()); - let mut visited = DefIdSet::default(); - let mut result = items.clone(); - - for cgu in cgus { - for item in cgu.items().keys() { - if let mir::mono::MonoItem::Fn(ref instance) = item { - let did = instance.def_id(); - if !visited.insert(did) { - continue; - } - let body = tcx.instance_mir(instance.def); - for block in body.basic_blocks.iter() { - for statement in &block.statements { - let mir::StatementKind::Coverage(_) = statement.kind else { continue }; - let scope = statement.source_info.scope; - if let Some(inlined) = scope.inlined_instance(&body.source_scopes) { - result.insert(inlined.def_id()); - } - } - } +struct UsageSets<'tcx> { + all_mono_items: &'tcx DefIdSet, + used_via_inlining: FxHashSet<DefId>, + missing_own_coverage: FxHashSet<DefId>, +} + +/// Prepare sets of definitions that are relevant to deciding whether something +/// is an "unused function" for coverage purposes. +fn prepare_usage_sets<'tcx>(tcx: TyCtxt<'tcx>) -> UsageSets<'tcx> { + let (all_mono_items, cgus) = tcx.collect_and_partition_mono_items(()); + + // Obtain a MIR body for each function participating in codegen, via an + // arbitrary instance. + let mut def_ids_seen = FxHashSet::default(); + let def_and_mir_for_all_mono_fns = cgus + .iter() + .flat_map(|cgu| cgu.items().keys()) + .filter_map(|item| match item { + mir::mono::MonoItem::Fn(instance) => Some(instance), + mir::mono::MonoItem::Static(_) | mir::mono::MonoItem::GlobalAsm(_) => None, + }) + // We only need one arbitrary instance per definition. + .filter(move |instance| def_ids_seen.insert(instance.def_id())) + .map(|instance| { + // We don't care about the instance, just its underlying MIR. + let body = tcx.instance_mir(instance.def); + (instance.def_id(), body) + }); + + // Functions whose coverage statments were found inlined into other functions. + let mut used_via_inlining = FxHashSet::default(); + // Functions that were instrumented, but had all of their coverage statements + // removed by later MIR transforms (e.g. UnreachablePropagation). + let mut missing_own_coverage = FxHashSet::default(); + + for (def_id, body) in def_and_mir_for_all_mono_fns { + let mut saw_own_coverage = false; + + // Inspect every coverage statement in the function's MIR. + for stmt in body + .basic_blocks + .iter() + .flat_map(|block| &block.statements) + .filter(|stmt| matches!(stmt.kind, mir::StatementKind::Coverage(_))) + { + if let Some(inlined) = stmt.source_info.scope.inlined_instance(&body.source_scopes) { + // This coverage statement was inlined from another function. + used_via_inlining.insert(inlined.def_id()); + } else { + // Non-inlined coverage statements belong to the enclosing function. + saw_own_coverage = true; } } + + if !saw_own_coverage && body.function_coverage_info.is_some() { + missing_own_coverage.insert(def_id); + } } - result + UsageSets { all_mono_items, used_via_inlining, missing_own_coverage } } -fn declare_unused_fn<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::Instance<'tcx> { - ty::Instance::new( +fn add_unused_function_coverage<'tcx>( + cx: &CodegenCx<'_, 'tcx>, + def_id: LocalDefId, + function_coverage_info: &'tcx mir::coverage::FunctionCoverageInfo, +) { + let tcx = cx.tcx; + let def_id = def_id.to_def_id(); + + // Make a dummy instance that fills in all generics with placeholders. + let instance = ty::Instance::new( def_id, ty::GenericArgs::for_item(tcx, def_id, |param, _| { if let ty::GenericParamDefKind::Lifetime = param.kind { @@ -431,14 +474,8 @@ fn declare_unused_fn<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::Instance<'tc tcx.mk_param_from_def(param) } }), - ) -} + ); -fn add_unused_function_coverage<'tcx>( - cx: &CodegenCx<'_, 'tcx>, - instance: ty::Instance<'tcx>, - function_coverage_info: &'tcx mir::coverage::FunctionCoverageInfo, -) { // An unused function's mappings will automatically be rewritten to map to // zero, because none of its counters/expressions are marked as seen. let function_coverage = FunctionCoverageCollector::unused(instance, function_coverage_info); diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 54f4bc06340..140566e8da9 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -14,9 +14,9 @@ use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; use rustc_llvm::RustString; use rustc_middle::bug; use rustc_middle::mir::coverage::CoverageKind; -use rustc_middle::mir::Coverage; use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::Instance; +use rustc_target::abi::Align; use std::cell::RefCell; @@ -24,8 +24,6 @@ pub(crate) mod ffi; pub(crate) mod map_data; pub mod mapgen; -const VAR_ALIGN_BYTES: usize = 8; - /// A context object for maintaining all state needed by the coverageinfo module. pub struct CrateCoverageContext<'ll, 'tcx> { /// Coverage data for each instrumented function identified by DefId. @@ -75,7 +73,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { #[instrument(level = "debug", skip(self))] - fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) { + fn add_coverage(&mut self, instance: Instance<'tcx>, kind: &CoverageKind) { // Our caller should have already taken care of inlining subtleties, // so we can assume that counter/expression IDs in this coverage // statement are meaningful for the given instance. @@ -98,7 +96,6 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { .entry(instance) .or_insert_with(|| FunctionCoverageCollector::new(instance, function_coverage_info)); - let Coverage { kind } = coverage; match *kind { CoverageKind::SpanMarker | CoverageKind::BlockMarker { .. } => unreachable!( "marker statement {kind:?} should have been removed by CleanupPostBorrowck" @@ -227,7 +224,8 @@ pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>( llvm::set_global_constant(llglobal, true); llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::set_section(llglobal, &covmap_section_name); - llvm::set_alignment(llglobal, VAR_ALIGN_BYTES); + // LLVM's coverage mapping format specifies 8-byte alignment for items in this section. + llvm::set_alignment(llglobal, Align::EIGHT); cx.add_used_global(llglobal); } @@ -257,7 +255,8 @@ pub(crate) fn save_func_record_to_mod<'ll, 'tcx>( llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage); llvm::set_visibility(llglobal, llvm::Visibility::Hidden); llvm::set_section(llglobal, covfun_section_name); - llvm::set_alignment(llglobal, VAR_ALIGN_BYTES); + // LLVM's coverage mapping format specifies 8-byte alignment for items in this section. + llvm::set_alignment(llglobal, Align::EIGHT); llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name); cx.add_used_global(llglobal); } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 5782b156335..e5fecddec52 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -452,7 +452,7 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D ty::Slice(_) | ty::Str => build_slice_type_di_node(cx, t, unique_type_id), ty::Dynamic(..) => build_dyn_type_di_node(cx, t, unique_type_id), ty::Foreign(..) => build_foreign_type_di_node(cx, t, unique_type_id), - ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => { + ty::RawPtr(pointee_type, _) | ty::Ref(_, pointee_type, _) => { build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id) } // Some `Box` are newtyped pointers, make debuginfo aware of that. @@ -554,13 +554,16 @@ pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> ) -> &'ll DIFile { debug!(?source_file.name); - use rustc_session::RemapFileNameExt; + let filename_display_preference = + cx.sess().filename_display_preference(RemapPathScopeComponents::DEBUGINFO); + + use rustc_session::config::RemapPathScopeComponents; let (directory, file_name) = match &source_file.name { FileName::Real(filename) => { let working_directory = &cx.sess().opts.working_dir; debug!(?working_directory); - if cx.sess().should_prefer_remapped_for_codegen() { + if filename_display_preference == FileNameDisplayPreference::Remapped { let filename = cx .sess() .source_map() @@ -623,7 +626,7 @@ pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> } other => { debug!(?other); - ("".into(), other.for_codegen(cx.sess()).to_string_lossy().into_owned()) + ("".into(), other.display(filename_display_preference).to_string()) } }; @@ -832,9 +835,11 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>( codegen_unit_name: &str, debug_context: &CodegenUnitDebugContext<'ll, 'tcx>, ) -> &'ll DIDescriptor { + use rustc_session::{config::RemapPathScopeComponents, RemapFileNameExt}; let mut name_in_debuginfo = tcx .sess .local_crate_source_file() + .map(|src| src.for_scope(&tcx.sess, RemapPathScopeComponents::DEBUGINFO).to_path_buf()) .unwrap_or_else(|| PathBuf::from(tcx.crate_name(LOCAL_CRATE).as_str())); // To avoid breaking split DWARF, we need to ensure that each codegen unit @@ -862,30 +867,29 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>( // FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice. let producer = format!("clang LLVM ({rustc_producer})"); - use rustc_session::RemapFileNameExt; let name_in_debuginfo = name_in_debuginfo.to_string_lossy(); - let work_dir = tcx.sess.opts.working_dir.for_codegen(tcx.sess).to_string_lossy(); + let work_dir = tcx + .sess + .opts + .working_dir + .for_scope(tcx.sess, RemapPathScopeComponents::DEBUGINFO) + .to_string_lossy(); let output_filenames = tcx.output_filenames(()); - let split_name = if tcx.sess.target_can_use_split_dwarf() { - output_filenames - .split_dwarf_path( - tcx.sess.split_debuginfo(), - tcx.sess.opts.unstable_opts.split_dwarf_kind, - Some(codegen_unit_name), - ) - // We get a path relative to the working directory from split_dwarf_path - .map(|f| { - if tcx.sess.should_prefer_remapped_for_split_debuginfo_paths() { - tcx.sess.source_map().path_mapping().map_prefix(f).0 - } else { - f.into() - } - }) + let split_name = if tcx.sess.target_can_use_split_dwarf() + && let Some(f) = output_filenames.split_dwarf_path( + tcx.sess.split_debuginfo(), + tcx.sess.opts.unstable_opts.split_dwarf_kind, + Some(codegen_unit_name), + ) { + // We get a path relative to the working directory from split_dwarf_path + Some(tcx.sess.source_map().path_mapping().to_real_filename(f)) } else { None - } - .unwrap_or_default(); - let split_name = split_name.to_str().unwrap(); + }; + let split_name = split_name + .as_ref() + .map(|f| f.for_scope(tcx.sess, RemapPathScopeComponents::DEBUGINFO).to_string_lossy()) + .unwrap_or_default(); let kind = DebugEmissionKind::from_generic(tcx.sess.opts.debuginfo); let dwarf_version = diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs index 4792b0798df..4edef14422e 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs @@ -683,7 +683,8 @@ fn build_union_fields_for_direct_tag_coroutine<'ll, 'tcx>( _ => unreachable!(), }; - let coroutine_layout = cx.tcx.optimized_mir(coroutine_def_id).coroutine_layout().unwrap(); + let coroutine_layout = + cx.tcx.coroutine_layout(coroutine_def_id, coroutine_args.kind_ty()).unwrap(); let common_upvar_names = cx.tcx.closure_saved_names_of_captured_variables(coroutine_def_id); let variant_range = coroutine_args.variant_range(coroutine_def_id, cx.tcx); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs index 3dbe820b8ff..115d5187eaf 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs @@ -135,7 +135,7 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>( unique_type_id: UniqueTypeId<'tcx>, ) -> DINodeCreationResult<'ll> { let coroutine_type = unique_type_id.expect_ty(); - let &ty::Coroutine(coroutine_def_id, _) = coroutine_type.kind() else { + let &ty::Coroutine(coroutine_def_id, coroutine_args) = coroutine_type.kind() else { bug!("build_coroutine_di_node() called with non-coroutine type: `{:?}`", coroutine_type) }; @@ -158,8 +158,10 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>( DIFlags::FlagZero, ), |cx, coroutine_type_di_node| { - let coroutine_layout = - cx.tcx.optimized_mir(coroutine_def_id).coroutine_layout().unwrap(); + let coroutine_layout = cx + .tcx + .coroutine_layout(coroutine_def_id, coroutine_args.as_coroutine().kind_ty()) + .unwrap(); let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } = coroutine_type_and_layout.variants diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index d3a851b40c0..4fdaa59e0e5 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -110,7 +110,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { .unstable_opts .dwarf_version .unwrap_or(sess.target.default_dwarf_version); - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( self.llmod, llvm::LLVMModFlagBehavior::Warning, c"Dwarf Version".as_ptr().cast(), @@ -118,7 +118,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { ); } else { // Indicate that we want CodeView debug information on MSVC - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( self.llmod, llvm::LLVMModFlagBehavior::Warning, c"CodeView".as_ptr().cast(), @@ -127,7 +127,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { } // Prevent bitcode readers from deleting the debug info. - llvm::LLVMRustAddModuleFlag( + llvm::LLVMRustAddModuleFlagU32( self.llmod, llvm::LLVMModFlagBehavior::Warning, c"Debug Info Version".as_ptr().cast(), diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 78c0725a637..7117c4a0ed9 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -18,12 +18,11 @@ use crate::llvm; use crate::llvm::AttributePlace::Function; use crate::type_::Type; use crate::value::Value; +use itertools::Itertools; use rustc_codegen_ssa::traits::TypeMembershipMethods; +use rustc_data_structures::fx::FxIndexSet; use rustc_middle::ty::{Instance, Ty}; -use rustc_symbol_mangling::typeid::{ - kcfi_typeid_for_fnabi, kcfi_typeid_for_instance, typeid_for_fnabi, typeid_for_instance, - TypeIdOptions, -}; +use rustc_sanitizers::{cfi, kcfi}; use smallvec::SmallVec; /// Declare a function. @@ -141,51 +140,51 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { if self.tcx.sess.is_sanitizer_cfi_enabled() { if let Some(instance) = instance { - let typeid = typeid_for_instance(self.tcx, &instance, TypeIdOptions::empty()); - self.set_type_metadata(llfn, typeid); - let typeid = - typeid_for_instance(self.tcx, &instance, TypeIdOptions::GENERALIZE_POINTERS); - self.add_type_metadata(llfn, typeid); - let typeid = - typeid_for_instance(self.tcx, &instance, TypeIdOptions::NORMALIZE_INTEGERS); - self.add_type_metadata(llfn, typeid); - let typeid = typeid_for_instance( - self.tcx, - &instance, - TypeIdOptions::GENERALIZE_POINTERS | TypeIdOptions::NORMALIZE_INTEGERS, - ); - self.add_type_metadata(llfn, typeid); + let mut typeids = FxIndexSet::default(); + for options in [ + cfi::TypeIdOptions::GENERALIZE_POINTERS, + cfi::TypeIdOptions::NORMALIZE_INTEGERS, + cfi::TypeIdOptions::USE_CONCRETE_SELF, + ] + .into_iter() + .powerset() + .map(cfi::TypeIdOptions::from_iter) + { + let typeid = cfi::typeid_for_instance(self.tcx, instance, options); + if typeids.insert(typeid.clone()) { + self.add_type_metadata(llfn, typeid); + } + } } else { - let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::empty()); - self.set_type_metadata(llfn, typeid); - let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::GENERALIZE_POINTERS); - self.add_type_metadata(llfn, typeid); - let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::NORMALIZE_INTEGERS); - self.add_type_metadata(llfn, typeid); - let typeid = typeid_for_fnabi( - self.tcx, - fn_abi, - TypeIdOptions::GENERALIZE_POINTERS | TypeIdOptions::NORMALIZE_INTEGERS, - ); - self.add_type_metadata(llfn, typeid); + for options in [ + cfi::TypeIdOptions::GENERALIZE_POINTERS, + cfi::TypeIdOptions::NORMALIZE_INTEGERS, + ] + .into_iter() + .powerset() + .map(cfi::TypeIdOptions::from_iter) + { + let typeid = cfi::typeid_for_fnabi(self.tcx, fn_abi, options); + self.add_type_metadata(llfn, typeid); + } } } if self.tcx.sess.is_sanitizer_kcfi_enabled() { // LLVM KCFI does not support multiple !kcfi_type attachments - let mut options = TypeIdOptions::empty(); + let mut options = kcfi::TypeIdOptions::empty(); if self.tcx.sess.is_sanitizer_cfi_generalize_pointers_enabled() { - options.insert(TypeIdOptions::GENERALIZE_POINTERS); + options.insert(kcfi::TypeIdOptions::GENERALIZE_POINTERS); } if self.tcx.sess.is_sanitizer_cfi_normalize_integers_enabled() { - options.insert(TypeIdOptions::NORMALIZE_INTEGERS); + options.insert(kcfi::TypeIdOptions::NORMALIZE_INTEGERS); } if let Some(instance) = instance { - let kcfi_typeid = kcfi_typeid_for_instance(self.tcx, &instance, options); + let kcfi_typeid = kcfi::typeid_for_instance(self.tcx, instance, options); self.set_kcfi_type_metadata(llfn, kcfi_typeid); } else { - let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options); + let kcfi_typeid = kcfi::typeid_for_fnabi(self.tcx, fn_abi, options); self.set_kcfi_type_metadata(llfn, kcfi_typeid); } } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 71b69a94e99..2bed7c1bd1c 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -181,6 +181,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { simple_fn, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None, + Some(instance), ) } sym::likely => { @@ -263,7 +264,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { llvm::LLVMSetAlignment(load, align); } if !result.layout.is_zst() { - self.store(load, result.llval, result.align); + self.store_to_place(load, result.val); } return Ok(()); } @@ -427,7 +428,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { sym::black_box => { args[0].val.store(self, result); - let result_val_span = [result.llval]; + let result_val_span = [result.val.llval]; // We need to "use" the argument in some way LLVM can't introspect, and on // targets that support it we can typically leverage inline assembly to do // this. LLVM's interpretation of inline assembly is that it's, well, a black @@ -481,7 +482,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { if !fn_abi.ret.is_ignore() { if let PassMode::Cast { .. } = &fn_abi.ret.mode { - self.store(llval, result.llval, result.align); + self.store(llval, result.val.llval, result.val.align); } else { OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) .val @@ -539,7 +540,7 @@ fn catch_unwind_intrinsic<'ll>( ) { if bx.sess().panic_strategy() == PanicStrategy::Abort { let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); - bx.call(try_func_ty, None, None, try_func, &[data], None); + bx.call(try_func_ty, None, None, try_func, &[data], None, None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. let ret_align = bx.tcx().data_layout.i32_align.abi; @@ -640,7 +641,7 @@ fn codegen_msvc_try<'ll>( let ptr_align = bx.tcx().data_layout.pointer_align.abi; let slot = bx.alloca(bx.type_ptr(), ptr_align); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); - bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None); + bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None); bx.switch_to_block(normal); bx.ret(bx.const_i32(0)); @@ -684,7 +685,7 @@ fn codegen_msvc_try<'ll>( let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]); let ptr = bx.load(bx.type_ptr(), slot, ptr_align); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); - bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet)); + bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None); bx.catch_ret(&funclet, caught); // The flag value of 64 indicates a "catch-all". @@ -692,7 +693,7 @@ fn codegen_msvc_try<'ll>( let flags = bx.const_i32(64); let null = bx.const_null(bx.type_ptr()); let funclet = bx.catch_pad(cs, &[null, flags, null]); - bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet)); + bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet), None); bx.catch_ret(&funclet, caught); bx.switch_to_block(caught); @@ -701,7 +702,7 @@ fn codegen_msvc_try<'ll>( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -750,7 +751,7 @@ fn codegen_wasm_try<'ll>( // } // let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); - bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None); + bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None); bx.switch_to_block(normal); bx.ret(bx.const_i32(0)); @@ -766,7 +767,7 @@ fn codegen_wasm_try<'ll>( let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); - bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet)); + bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None); bx.catch_ret(&funclet, caught); bx.switch_to_block(caught); @@ -775,7 +776,7 @@ fn codegen_wasm_try<'ll>( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -818,7 +819,7 @@ fn codegen_gnu_try<'ll>( let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); - bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None); + bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None); bx.switch_to_block(then); bx.ret(bx.const_i32(0)); @@ -836,13 +837,13 @@ fn codegen_gnu_try<'ll>( bx.add_clause(vals, tydesc); let ptr = bx.extract_value(vals, 0); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); - bx.call(catch_ty, None, None, catch_func, &[data, ptr], None); + bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None); bx.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -882,7 +883,7 @@ fn codegen_emcc_try<'ll>( let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); - bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None); + bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None); bx.switch_to_block(then); bx.ret(bx.const_i32(0)); @@ -920,13 +921,13 @@ fn codegen_emcc_try<'ll>( bx.store(is_rust_panic, catch_data_1, i8_align); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); - bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None); + bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None, None); bx.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -1064,7 +1065,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let place = PlaceRef::alloca(bx, args[0].layout); args[0].val.store(bx, place); let int_ty = bx.type_ix(expected_bytes * 8); - bx.load(int_ty, place.llval, Align::ONE) + bx.load(int_ty, place.val.llval, Align::ONE) } _ => return_error!(InvalidMonomorphization::InvalidBitmask { span, @@ -1439,6 +1440,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None, + None, ); Ok(c) } @@ -1483,7 +1485,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( v.normalize(bx.target_spec().pointer_width).bit_width().unwrap() ), ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()), - ty::RawPtr(_) => format!("v{}p0", vec_len), + ty::RawPtr(_, _) => format!("v{}p0", vec_len), _ => unreachable!(), } } @@ -1493,7 +1495,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( ty::Int(v) => cx.type_int_from_ty(v), ty::Uint(v) => cx.type_uint_from_ty(v), ty::Float(v) => cx.type_float_from_ty(v), - ty::RawPtr(_) => cx.type_ptr(), + ty::RawPtr(_, _) => cx.type_ptr(), _ => unreachable!(), }; cx.type_vector(elem_ty, vec_len) @@ -1548,8 +1550,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( require!( matches!( - element_ty1.kind(), - ty::RawPtr(p) if p.ty == in_elem && p.ty.kind() == element_ty0.kind() + *element_ty1.kind(), + ty::RawPtr(p_ty, _) if p_ty == in_elem && p_ty.kind() == element_ty0.kind() ), InvalidMonomorphization::ExpectedElementType { span, @@ -1607,6 +1609,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None, + None, ); return Ok(v); } @@ -1654,8 +1657,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( require!( matches!( - pointer_ty.kind(), - ty::RawPtr(p) if p.ty == values_elem && p.ty.kind() == values_elem.kind() + *pointer_ty.kind(), + ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind() ), InvalidMonomorphization::ExpectedElementType { span, @@ -1706,6 +1709,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &[args[1].immediate(), alignment, mask, args[2].immediate()], None, + None, ); return Ok(v); } @@ -1746,8 +1750,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // The second argument must be a mutable pointer type matching the element type require!( matches!( - pointer_ty.kind(), - ty::RawPtr(p) if p.ty == values_elem && p.ty.kind() == values_elem.kind() && p.mutbl.is_mut() + *pointer_ty.kind(), + ty::RawPtr(p_ty, p_mutbl) if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut() ), InvalidMonomorphization::ExpectedElementType { span, @@ -1799,6 +1803,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &[args[2].immediate(), args[1].immediate(), alignment, mask], None, + None, ); return Ok(v); } @@ -1843,9 +1848,9 @@ fn generic_simd_intrinsic<'ll, 'tcx>( require!( matches!( - element_ty1.kind(), - ty::RawPtr(p) - if p.ty == in_elem && p.mutbl.is_mut() && p.ty.kind() == element_ty0.kind() + *element_ty1.kind(), + ty::RawPtr(p_ty, p_mutbl) + if p_ty == in_elem && p_mutbl.is_mut() && p_ty.kind() == element_ty0.kind() ), InvalidMonomorphization::ExpectedElementType { span, @@ -1904,6 +1909,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None, + None, ); return Ok(v); } @@ -2074,8 +2080,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( ); match in_elem.kind() { - ty::RawPtr(p) => { - let metadata = p.ty.ptr_metadata_ty(bx.tcx, |ty| { + ty::RawPtr(p_ty, _) => { + let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| { bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty) }); require!( @@ -2088,8 +2094,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } } match out_elem.kind() { - ty::RawPtr(p) => { - let metadata = p.ty.ptr_metadata_ty(bx.tcx, |ty| { + ty::RawPtr(p_ty, _) => { + let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| { bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty) }); require!( @@ -2105,7 +2111,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return Ok(args[0].immediate()); } - if name == sym::simd_expose_addr { + if name == sym::simd_expose_provenance { let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); require!( in_len == out_len, @@ -2120,7 +2126,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( ); match in_elem.kind() { - ty::RawPtr(_) => {} + ty::RawPtr(_, _) => {} _ => { return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem }) } @@ -2133,7 +2139,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return Ok(bx.ptrtoint(args[0].immediate(), llret_ty)); } - if name == sym::simd_from_exposed_addr { + if name == sym::simd_with_exposed_provenance { let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); require!( in_len == out_len, @@ -2152,7 +2158,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }), } match out_elem.kind() { - ty::RawPtr(_) => {} + ty::RawPtr(_, _) => {} _ => { return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem }) } @@ -2352,11 +2358,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &[args[0].immediate(), bx.const_int(bx.type_i1(), 0)], None, + None, )) } else { let fn_ty = bx.type_func(&[vec_ty], vec_ty); let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None)) + Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None, None)) }; } @@ -2409,7 +2416,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty); let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - let v = bx.call(fn_ty, None, None, f, &[lhs, rhs], None); + let v = bx.call(fn_ty, None, None, f, &[lhs, rhs], None, None); return Ok(v); } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 284bc74d5c4..5509baaa3e9 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1801,12 +1801,21 @@ extern "C" { /// /// In order for Rust-C LTO to work, module flags must be compatible with Clang. What /// "compatible" means depends on the merge behaviors involved. - pub fn LLVMRustAddModuleFlag( + pub fn LLVMRustAddModuleFlagU32( M: &Module, merge_behavior: LLVMModFlagBehavior, name: *const c_char, value: u32, ); + + pub fn LLVMRustAddModuleFlagString( + M: &Module, + merge_behavior: LLVMModFlagBehavior, + name: *const c_char, + value: *const c_char, + value_len: size_t, + ); + pub fn LLVMRustHasModuleFlag(M: &Module, name: *const c_char, len: size_t) -> bool; pub fn LLVMRustDIBuilderCreate(M: &Module) -> &mut DIBuilder<'_>; diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 4f5cc575da6..6ab1eea9597 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -11,6 +11,7 @@ pub use self::RealPredicate::*; use libc::c_uint; use rustc_data_structures::small_c_str::SmallCStr; use rustc_llvm::RustString; +use rustc_target::abi::Align; use std::cell::RefCell; use std::ffi::{CStr, CString}; use std::str::FromStr; @@ -229,9 +230,9 @@ pub fn set_visibility(llglobal: &Value, visibility: Visibility) { } } -pub fn set_alignment(llglobal: &Value, bytes: usize) { +pub fn set_alignment(llglobal: &Value, align: Align) { unsafe { - ffi::LLVMSetAlignment(llglobal, bytes as c_uint); + ffi::LLVMSetAlignment(llglobal, align.bytes() as c_uint); } } diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index af1bbda4d08..a00f09dc40d 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -281,9 +281,6 @@ impl<'ll, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn reg_backend_type(&self, ty: &Reg) -> &'ll Type { ty.llvm_type(self) } - fn scalar_copy_backend_type(&self, layout: TyAndLayout<'tcx>) -> Option<Self::Type> { - layout.scalar_copy_llvm_type(self) - } } impl<'ll, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'ll, 'tcx> { diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index d10a083765b..40ed6baa610 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -5,7 +5,6 @@ use rustc_middle::bug; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, Ty, TypeVisitableExt}; -use rustc_target::abi::HasDataLayout; use rustc_target::abi::{Abi, Align, FieldsShape}; use rustc_target::abi::{Int, Pointer, F128, F16, F32, F64}; use rustc_target::abi::{Scalar, Size, Variants}; @@ -166,7 +165,6 @@ pub trait LayoutLlvmExt<'tcx> { index: usize, immediate: bool, ) -> &'a Type; - fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>; } impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { @@ -308,44 +306,4 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { self.scalar_llvm_type_at(cx, scalar) } - - fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type> { - debug_assert!(self.is_sized()); - - // FIXME: this is a fairly arbitrary choice, but 128 bits on WASM - // (matching the 128-bit SIMD types proposal) and 256 bits on x64 - // (like AVX2 registers) seems at least like a tolerable starting point. - let threshold = cx.data_layout().pointer_size * 4; - if self.layout.size() > threshold { - return None; - } - - // Vectors, even for non-power-of-two sizes, have the same layout as - // arrays but don't count as aggregate types - // While LLVM theoretically supports non-power-of-two sizes, and they - // often work fine, sometimes x86-isel deals with them horribly - // (see #115212) so for now only use power-of-two ones. - if let FieldsShape::Array { count, .. } = self.layout.fields() - && count.is_power_of_two() - && let element = self.field(cx, 0) - && element.ty.is_integral() - { - // `cx.type_ix(bits)` is tempting here, but while that works great - // for things that *stay* as memory-to-memory copies, it also ends - // up suppressing vectorization as it introduces shifts when it - // extracts all the individual values. - - let ety = element.llvm_type(cx); - if *count == 1 { - // Emitting `<1 x T>` would be silly; just use the scalar. - return Some(ety); - } else { - return Some(cx.type_vector(ety, *count)); - } - } - - // FIXME: The above only handled integer arrays; surely more things - // would also be possible. Be careful about provenance, though! - None - } } |
