diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
23 files changed, 499 insertions, 400 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index b5b4f894e4d..e5f5146fac8 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -203,57 +203,63 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { val: &'ll Value, dst: PlaceRef<'tcx, &'ll Value>, ) { - if self.is_ignore() { - return; - } - if self.is_sized_indirect() { - OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst) - } else if self.is_unsized_indirect() { - bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); - } else if let PassMode::Cast { cast, pad_i32: _ } = &self.mode { - // FIXME(eddyb): Figure out when the simpler Store is safe, clang - // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. - let can_store_through_cast_ptr = false; - if can_store_through_cast_ptr { - bx.store(val, dst.llval, self.layout.align.abi); - } else { - // The actual return type is a struct, but the ABI - // adaptation code has cast it into some scalar type. The - // code that follows is the only reliable way I have - // found to do a transform like i64 -> {i32,i32}. - // Basically we dump the data onto the stack then memcpy it. - // - // Other approaches I tried: - // - Casting rust ret pointer to the foreign type and using Store - // is (a) unsafe if size of foreign type > size of rust type and - // (b) runs afoul of strict aliasing rules, yielding invalid - // assembly under -O (specifically, the store gets removed). - // - Truncating foreign type to correct integral type and then - // bitcasting to the struct type yields invalid cast errors. - - // We instead thus allocate some scratch space... - let scratch_size = cast.size(bx); - let scratch_align = cast.align(bx); - let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); - bx.lifetime_start(llscratch, scratch_size); - - // ... where we first store the value... - bx.store(val, llscratch, scratch_align); - - // ... and then memcpy it to the intended destination. - bx.memcpy( - dst.llval, - self.layout.align.abi, - llscratch, - scratch_align, - bx.const_usize(self.layout.size.bytes()), - MemFlags::empty(), - ); + match &self.mode { + PassMode::Ignore => {} + // Sized indirect arguments + PassMode::Indirect { attrs, meta_attrs: None, on_stack: _ } => { + let align = attrs.pointee_align.unwrap_or(self.layout.align.abi); + OperandValue::Ref(val, None, align).store(bx, dst); + } + // Unsized indirect qrguments + PassMode::Indirect { attrs: _, meta_attrs: Some(_), on_stack: _ } => { + bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); + } + PassMode::Cast { cast, pad_i32: _ } => { + // FIXME(eddyb): Figure out when the simpler Store is safe, clang + // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. + let can_store_through_cast_ptr = false; + if can_store_through_cast_ptr { + bx.store(val, dst.llval, self.layout.align.abi); + } else { + // The actual return type is a struct, but the ABI + // adaptation code has cast it into some scalar type. The + // code that follows is the only reliable way I have + // found to do a transform like i64 -> {i32,i32}. + // Basically we dump the data onto the stack then memcpy it. + // + // Other approaches I tried: + // - Casting rust ret pointer to the foreign type and using Store + // is (a) unsafe if size of foreign type > size of rust type and + // (b) runs afoul of strict aliasing rules, yielding invalid + // assembly under -O (specifically, the store gets removed). + // - Truncating foreign type to correct integral type and then + // bitcasting to the struct type yields invalid cast errors. + + // We instead thus allocate some scratch space... + let scratch_size = cast.size(bx); + let scratch_align = cast.align(bx); + let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); + bx.lifetime_start(llscratch, scratch_size); + + // ... where we first store the value... + bx.store(val, llscratch, scratch_align); + + // ... and then memcpy it to the intended destination. + bx.memcpy( + dst.llval, + self.layout.align.abi, + llscratch, + scratch_align, + bx.const_usize(self.layout.size.bytes()), + MemFlags::empty(), + ); - bx.lifetime_end(llscratch, scratch_size); + bx.lifetime_end(llscratch, scratch_size); + } + } + _ => { + OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst); } - } else { - OperandRef::from_immediate_or_packed_pair(bx, val, self.layout).val.store(bx, dst); } } @@ -424,7 +430,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Indirect { attrs, meta_attrs: _, on_stack } => { assert!(!on_stack); let i = apply(attrs); - let sret = llvm::CreateStructRetAttr(cx.llcx, self.ret.layout.llvm_type(cx)); + let sret = llvm::CreateStructRetAttr( + cx.llcx, + cx.type_array(cx.type_i8(), self.ret.layout.size.bytes()), + ); attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]); } PassMode::Cast { cast, pad_i32: _ } => { @@ -437,7 +446,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Ignore => {} PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => { let i = apply(attrs); - let byval = llvm::CreateByValAttr(cx.llcx, arg.layout.llvm_type(cx)); + let byval = llvm::CreateByValAttr( + cx.llcx, + cx.type_array(cx.type_i8(), arg.layout.size.bytes()), + ); attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]); } PassMode::Direct(attrs) @@ -486,7 +498,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Indirect { attrs, meta_attrs: _, on_stack } => { assert!(!on_stack); let i = apply(bx.cx, attrs); - let sret = llvm::CreateStructRetAttr(bx.cx.llcx, self.ret.layout.llvm_type(bx)); + let sret = llvm::CreateStructRetAttr( + bx.cx.llcx, + bx.cx.type_array(bx.cx.type_i8(), self.ret.layout.size.bytes()), + ); attributes::apply_to_callsite(callsite, llvm::AttributePlace::Argument(i), &[sret]); } PassMode::Cast { cast, pad_i32: _ } => { @@ -513,7 +528,10 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Ignore => {} PassMode::Indirect { attrs, meta_attrs: None, on_stack: true } => { let i = apply(bx.cx, attrs); - let byval = llvm::CreateByValAttr(bx.cx.llcx, arg.layout.llvm_type(bx)); + let byval = llvm::CreateByValAttr( + bx.cx.llcx, + bx.cx.type_array(bx.cx.type_i8(), arg.layout.size.bytes()), + ); attributes::apply_to_callsite( callsite, llvm::AttributePlace::Argument(i), diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 74539d4d495..500904ce188 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -466,11 +466,11 @@ pub(crate) fn inline_asm_call<'ll>( let call = if !labels.is_empty() { assert!(catch_funclet.is_none()); - bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None) + bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None, None) } else if let Some((catch, funclet)) = catch_funclet { - bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet) + bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet, None) } else { - bx.call(fty, None, None, v, inputs, None) + bx.call(fty, None, None, v, inputs, None, None) }; // Store mark in a metadata node so we can map LLVM errors diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index f9eaa0d94cb..870e5ab3296 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -417,7 +417,7 @@ pub fn from_fn_attrs<'ll, 'tcx>( to_add.push(llvm::CreateAttrString(cx.llcx, "cmse_nonsecure_entry")); } if let Some(align) = codegen_fn_attrs.alignment { - llvm::set_alignment(llfn, align as usize); + llvm::set_alignment(llfn, align); } to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize)); diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 031bbd63361..4efea66a7f1 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -29,7 +29,8 @@ use rustc_data_structures::small_c_str::SmallCStr; use rustc_errors::{DiagCtxt, FatalError, Level}; use rustc_fs_util::{link_or_copy, path_to_c_string}; use rustc_middle::ty::TyCtxt; -use rustc_session::config::{self, Lto, OutputType, Passes, SplitDwarfKind, SwitchWithOptPath}; +use rustc_session::config::{self, Lto, OutputType, Passes}; +use rustc_session::config::{RemapPathScopeComponents, SplitDwarfKind, SwitchWithOptPath}; use rustc_session::Session; use rustc_span::symbol::sym; use rustc_span::InnerSpan; @@ -257,18 +258,17 @@ pub fn target_machine_factory( }; let debuginfo_compression = SmallCStr::new(&debuginfo_compression); - let should_prefer_remapped_for_split_debuginfo_paths = - sess.should_prefer_remapped_for_split_debuginfo_paths(); + let file_name_display_preference = + sess.filename_display_preference(RemapPathScopeComponents::DEBUGINFO); Arc::new(move |config: TargetMachineFactoryConfig| { let path_to_cstring_helper = |path: Option<PathBuf>| -> CString { let path = path.unwrap_or_default(); - let path = if should_prefer_remapped_for_split_debuginfo_paths { - path_mapping.map_prefix(path).0 - } else { - path.into() - }; - CString::new(path.to_str().unwrap()).unwrap() + let path = path_mapping + .to_real_filename(path) + .to_string_lossy(file_name_display_preference) + .into_owned(); + CString::new(path).unwrap() }; let split_dwarf_file = path_to_cstring_helper(config.split_dwarf_file); diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index ca2e2b57580..1a32958d362 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -19,9 +19,13 @@ use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::layout::{ FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout, }; -use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; +use rustc_session::config::OptLevel; use rustc_span::Span; -use rustc_symbol_mangling::typeid::{kcfi_typeid_for_fnabi, typeid_for_fnabi, TypeIdOptions}; +use rustc_symbol_mangling::typeid::{ + kcfi_typeid_for_fnabi, kcfi_typeid_for_instance, typeid_for_fnabi, typeid_for_instance, + TypeIdOptions, +}; use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange}; use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target}; use smallvec::SmallVec; @@ -221,6 +225,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { then: &'ll BasicBlock, catch: &'ll BasicBlock, funclet: Option<&Funclet<'ll>>, + instance: Option<Instance<'tcx>>, ) -> &'ll Value { debug!("invoke {:?} with args ({:?})", llfn, args); @@ -233,10 +238,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } // Emit CFI pointer type membership test - self.cfi_type_test(fn_attrs, fn_abi, llfn); + self.cfi_type_test(fn_attrs, fn_abi, instance, llfn); // Emit KCFI operand bundle - let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn); + let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); if let Some(kcfi_bundle) = kcfi_bundle { bundles.push(kcfi_bundle); @@ -547,6 +552,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { layout: TyAndLayout<'tcx>, offset: Size, ) { + if bx.cx.sess().opts.optimize == OptLevel::No { + // Don't emit metadata we're not going to use + return; + } + if !scalar.is_uninit_valid() { bx.noundef_metadata(load); } @@ -663,6 +673,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { return; } + if self.cx.sess().opts.optimize == OptLevel::No { + // Don't emit metadata we're not going to use + return; + } + unsafe { let llty = self.cx.val_ty(load); let v = [ @@ -1132,9 +1147,15 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { &mut self, op: rustc_codegen_ssa::common::AtomicRmwBinOp, dst: &'ll Value, - src: &'ll Value, + mut src: &'ll Value, order: rustc_codegen_ssa::common::AtomicOrdering, ) -> &'ll Value { + // The only RMW operation that LLVM supports on pointers is compare-exchange. + if self.val_ty(src) == self.type_ptr() + && op != rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg + { + src = self.ptrtoint(src, self.type_isize()); + } unsafe { llvm::LLVMBuildAtomicRMW( self.llbuilder, @@ -1225,6 +1246,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { llfn: &'ll Value, args: &[&'ll Value], funclet: Option<&Funclet<'ll>>, + instance: Option<Instance<'tcx>>, ) -> &'ll Value { debug!("call {:?} with args ({:?})", llfn, args); @@ -1237,10 +1259,10 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } // Emit CFI pointer type membership test - self.cfi_type_test(fn_attrs, fn_abi, llfn); + self.cfi_type_test(fn_attrs, fn_abi, instance, llfn); // Emit KCFI operand bundle - let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn); + let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); if let Some(kcfi_bundle) = kcfi_bundle { bundles.push(kcfi_bundle); @@ -1462,7 +1484,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value { let (ty, f) = self.cx.get_intrinsic(intrinsic); - self.call(ty, None, None, f, args, None) + self.call(ty, None, None, f, args, None, None) } fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { @@ -1520,7 +1542,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { format!("llvm.{instr}.sat.i{int_width}.f{float_width}") }; let f = self.declare_cfn(&name, llvm::UnnamedAddr::No, self.type_func(&[src_ty], dest_ty)); - self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None) + self.call(self.type_func(&[src_ty], dest_ty), None, None, f, &[val], None, None) } pub(crate) fn landing_pad( @@ -1548,6 +1570,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { default_dest: &'ll BasicBlock, indirect_dest: &[&'ll BasicBlock], funclet: Option<&Funclet<'ll>>, + instance: Option<Instance<'tcx>>, ) -> &'ll Value { debug!("invoke {:?} with args ({:?})", llfn, args); @@ -1560,10 +1583,10 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { } // Emit CFI pointer type membership test - self.cfi_type_test(fn_attrs, fn_abi, llfn); + self.cfi_type_test(fn_attrs, fn_abi, instance, llfn); // Emit KCFI operand bundle - let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, llfn); + let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); if let Some(kcfi_bundle) = kcfi_bundle { bundles.push(kcfi_bundle); @@ -1595,6 +1618,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { &mut self, fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, + instance: Option<Instance<'tcx>>, llfn: &'ll Value, ) { let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) }; @@ -1616,7 +1640,11 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { options.insert(TypeIdOptions::NORMALIZE_INTEGERS); } - let typeid = typeid_for_fnabi(self.tcx, fn_abi, options); + let typeid = if let Some(instance) = instance { + typeid_for_instance(self.tcx, instance, options) + } else { + typeid_for_fnabi(self.tcx, fn_abi, options) + }; let typeid_metadata = self.cx.typeid_metadata(typeid).unwrap(); // Test whether the function pointer is associated with the type identifier. @@ -1638,6 +1666,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { &mut self, fn_attrs: Option<&CodegenFnAttrs>, fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, + instance: Option<Instance<'tcx>>, llfn: &'ll Value, ) -> Option<llvm::OperandBundleDef<'ll>> { let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) }; @@ -1659,7 +1688,12 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { options.insert(TypeIdOptions::NORMALIZE_INTEGERS); } - let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options); + let kcfi_typeid = if let Some(instance) = instance { + kcfi_typeid_for_instance(self.tcx, instance, options) + } else { + kcfi_typeid_for_fnabi(self.tcx, fn_abi, options) + }; + Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)])) } else { None diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 8173e41aff4..568fcc3f3cf 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -95,11 +95,13 @@ impl<'ll> BackendTypes for CodegenCx<'ll, '_> { impl<'ll> CodegenCx<'ll, '_> { pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { - unsafe { llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint) } + let len = u64::try_from(elts.len()).expect("LLVMConstArray2 elements len overflow"); + unsafe { llvm::LLVMConstArray2(ty, elts.as_ptr(), len) } } pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { - unsafe { llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint) } + let len = c_uint::try_from(elts.len()).expect("LLVMConstVector elements len overflow"); + unsafe { llvm::LLVMConstVector(elts.as_ptr(), len) } } pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { @@ -108,8 +110,8 @@ impl<'ll> CodegenCx<'ll, '_> { pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { unsafe { - assert_eq!(idx as c_uint as u64, idx); - let r = llvm::LLVMGetAggregateElement(v, idx as c_uint).unwrap(); + let idx = c_uint::try_from(idx).expect("LLVMGetAggregateElement index overflow"); + let r = llvm::LLVMGetAggregateElement(v, idx).unwrap(); debug!("const_get_elt(v={:?}, idx={}, r={:?})", v, idx, r); @@ -158,6 +160,10 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { self.const_int(self.type_i32(), i as i64) } + fn const_i8(&self, i: i8) -> &'ll Value { + self.const_int(self.type_i8(), i as i64) + } + fn const_u32(&self, i: u32) -> &'ll Value { self.const_uint(self.type_i32(), i as u64) } @@ -329,7 +335,7 @@ pub fn val_ty(v: &Value) -> &Type { pub fn bytes_in_context<'ll>(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { unsafe { let ptr = bytes.as_ptr() as *const c_char; - llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True) + llvm::LLVMConstStringInContext2(llcx, ptr, bytes.len(), True) } } @@ -338,9 +344,8 @@ pub fn struct_in_context<'ll>( elts: &[&'ll Value], packed: bool, ) -> &'ll Value { - unsafe { - llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), elts.len() as c_uint, packed as Bool) - } + let len = c_uint::try_from(elts.len()).expect("LLVMConstStructInContext elements len overflow"); + unsafe { llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), len, packed as Bool) } } #[inline] diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index ec2fb2c6e54..4afa230e598 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -9,6 +9,7 @@ use crate::type_::Type; use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_codegen_ssa::traits::*; +use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; use rustc_middle::mir::interpret::{ @@ -17,7 +18,7 @@ use rustc_middle::mir::interpret::{ }; use rustc_middle::mir::mono::MonoItem; use rustc_middle::ty::layout::LayoutOf; -use rustc_middle::ty::{self, Instance, Ty}; +use rustc_middle::ty::{self, Instance}; use rustc_middle::{bug, span_bug}; use rustc_session::config::Lto; use rustc_target::abi::{ @@ -114,7 +115,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation< cx.const_struct(&llvals, true) } -pub fn codegen_static_initializer<'ll, 'tcx>( +fn codegen_static_initializer<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, def_id: DefId, ) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> { @@ -147,11 +148,10 @@ fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: fn check_and_apply_linkage<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, attrs: &CodegenFnAttrs, - ty: Ty<'tcx>, + llty: &'ll Type, sym: &str, def_id: DefId, ) -> &'ll Value { - let llty = cx.layout_of(ty).llvm_type(cx); if let Some(linkage) = attrs.import_linkage { debug!("get_static: sym={} linkage={:?}", sym, linkage); @@ -226,9 +226,28 @@ impl<'ll> CodegenCx<'ll, '_> { } } + #[instrument(level = "debug", skip(self))] pub(crate) fn get_static(&self, def_id: DefId) -> &'ll Value { let instance = Instance::mono(self.tcx, def_id); - if let Some(&g) = self.instances.borrow().get(&instance) { + trace!(?instance); + + let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() }; + // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure out + // the llvm type from the actual evaluated initializer. + let llty = if nested { + self.type_i8() + } else { + let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); + trace!(?ty); + self.layout_of(ty).llvm_type(self) + }; + self.get_static_inner(def_id, llty) + } + + #[instrument(level = "debug", skip(self, llty))] + pub(crate) fn get_static_inner(&self, def_id: DefId, llty: &'ll Type) -> &'ll Value { + if let Some(&g) = self.instances.borrow().get(&Instance::mono(self.tcx, def_id)) { + trace!("used cached value"); return g; } @@ -240,14 +259,12 @@ impl<'ll> CodegenCx<'ll, '_> { statics defined in the same CGU, but did not for `{def_id:?}`" ); - let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); - let sym = self.tcx.symbol_name(instance).name; + let sym = self.tcx.symbol_name(Instance::mono(self.tcx, def_id)).name; let fn_attrs = self.tcx.codegen_fn_attrs(def_id); - debug!("get_static: sym={} instance={:?} fn_attrs={:?}", sym, instance, fn_attrs); + debug!(?sym, ?fn_attrs); let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) { - let llty = self.layout_of(ty).llvm_type(self); if let Some(g) = self.get_declared_value(sym) { if self.val_ty(g) != self.type_ptr() { span_bug!(self.tcx.def_span(def_id), "Conflicting types for static"); @@ -264,7 +281,7 @@ impl<'ll> CodegenCx<'ll, '_> { g } else { - check_and_apply_linkage(self, fn_attrs, ty, sym, def_id) + check_and_apply_linkage(self, fn_attrs, llty, sym, def_id) }; // Thread-local statics in some other crate need to *always* be linked @@ -332,34 +349,18 @@ impl<'ll> CodegenCx<'ll, '_> { } } - self.instances.borrow_mut().insert(instance, g); + self.instances.borrow_mut().insert(Instance::mono(self.tcx, def_id), g); g } -} - -impl<'ll> StaticMethods for CodegenCx<'ll, '_> { - fn static_addr_of(&self, cv: &'ll Value, align: Align, kind: Option<&str>) -> &'ll Value { - if let Some(&gv) = self.const_globals.borrow().get(&cv) { - unsafe { - // Upgrade the alignment in cases where the same constant is used with different - // alignment requirements - let llalign = align.bytes() as u32; - if llalign > llvm::LLVMGetAlignment(gv) { - llvm::LLVMSetAlignment(gv, llalign); - } - } - return gv; - } - let gv = self.static_addr_of_mut(cv, align, kind); - unsafe { - llvm::LLVMSetGlobalConstant(gv, True); - } - self.const_globals.borrow_mut().insert(cv, gv); - gv - } - fn codegen_static(&self, def_id: DefId, is_mutable: bool) { + fn codegen_static_item(&self, def_id: DefId) { unsafe { + assert!( + llvm::LLVMGetInitializer( + self.instances.borrow().get(&Instance::mono(self.tcx, def_id)).unwrap() + ) + .is_none() + ); let attrs = self.tcx.codegen_fn_attrs(def_id); let Ok((v, alloc)) = codegen_static_initializer(self, def_id) else { @@ -368,13 +369,11 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { }; let alloc = alloc.inner(); - let g = self.get_static(def_id); - let val_llty = self.val_ty(v); - let instance = Instance::mono(self.tcx, def_id); - let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); - let llty = self.layout_of(ty).llvm_type(self); + let g = self.get_static_inner(def_id, val_llty); + let llty = self.val_ty(g); + let g = if val_llty == llty { g } else { @@ -409,16 +408,15 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { self.statics_to_rauw.borrow_mut().push((g, new_g)); new_g }; - set_global_alignment(self, g, self.align_of(ty)); + set_global_alignment(self, g, alloc.align); llvm::LLVMSetInitializer(g, v); if self.should_assume_dso_local(g, true) { llvm::LLVMRustSetDSOLocal(g, true); } - // As an optimization, all shared statics which do not have interior - // mutability are placed into read-only memory. - if !is_mutable && self.type_is_freeze(ty) { + // Forward the allocation's mutability (picked by the const interner) to LLVM. + if alloc.mutability.is_not() { llvm::LLVMSetGlobalConstant(g, llvm::True); } @@ -541,6 +539,32 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> { } } } +} + +impl<'ll> StaticMethods for CodegenCx<'ll, '_> { + fn static_addr_of(&self, cv: &'ll Value, align: Align, kind: Option<&str>) -> &'ll Value { + if let Some(&gv) = self.const_globals.borrow().get(&cv) { + unsafe { + // Upgrade the alignment in cases where the same constant is used with different + // alignment requirements + let llalign = align.bytes() as u32; + if llalign > llvm::LLVMGetAlignment(gv) { + llvm::LLVMSetAlignment(gv, llalign); + } + } + return gv; + } + let gv = self.static_addr_of_mut(cv, align, kind); + unsafe { + llvm::LLVMSetGlobalConstant(gv, True); + } + self.const_globals.borrow_mut().insert(cv, gv); + gv + } + + fn codegen_static(&self, def_id: DefId) { + self.codegen_static_item(def_id) + } /// Add a global value to a list to be stored in the `llvm.used` variable, an array of ptr. fn add_used_global(&self, global: &'ll Value) { diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index d1f32087908..df9f066e58a 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -27,9 +27,7 @@ use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet}; use rustc_session::Session; use rustc_span::source_map::Spanned; use rustc_span::Span; -use rustc_target::abi::{ - call::FnAbi, HasDataLayout, PointeeInfo, Size, TargetDataLayout, VariantIdx, -}; +use rustc_target::abi::{call::FnAbi, HasDataLayout, TargetDataLayout, VariantIdx}; use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel}; use smallvec::SmallVec; @@ -77,13 +75,12 @@ pub struct CodegenCx<'ll, 'tcx> { /// See <https://llvm.org/docs/LangRef.html#the-llvm-compiler-used-global-variable> for details pub compiler_used_statics: RefCell<Vec<&'ll Value>>, - /// Mapping of non-scalar types to llvm types and field remapping if needed. - pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), TypeLowering<'ll>>>, + /// Mapping of non-scalar types to llvm types. + pub type_lowering: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), &'ll Type>>, /// Mapping of scalar types to llvm types. pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>, - pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>, pub isize_ty: &'ll Type, pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'ll, 'tcx>>, @@ -105,15 +102,6 @@ pub struct CodegenCx<'ll, 'tcx> { pub renamed_statics: RefCell<FxHashMap<DefId, &'ll Value>>, } -pub struct TypeLowering<'ll> { - /// Associated LLVM type - pub lltype: &'ll Type, - - /// If padding is used the slice maps fields from source order - /// to llvm order. - pub field_remapping: Option<SmallVec<[u32; 4]>>, -} - fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode { match tls_model { TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic, @@ -135,17 +123,6 @@ pub unsafe fn create_module<'ll>( let mut target_data_layout = sess.target.data_layout.to_string(); let llvm_version = llvm_util::get_version(); - if llvm_version < (17, 0, 0) { - if sess.target.arch.starts_with("powerpc") { - // LLVM 17 specifies function pointer alignment for ppc: - // https://reviews.llvm.org/D147016 - target_data_layout = target_data_layout - .replace("-Fn32", "") - .replace("-Fi32", "") - .replace("-Fn64", "") - .replace("-Fi64", ""); - } - } if llvm_version < (18, 0, 0) { if sess.target.arch == "x86" || sess.target.arch == "x86_64" { // LLVM 18 adjusts i128 to be 128-bit aligned on x86 variants. @@ -335,6 +312,7 @@ pub unsafe fn create_module<'ll>( // // On the wasm targets it will get hooked up to the "producer" sections // `processed-by` information. + #[allow(clippy::option_env_unwrap)] let rustc_producer = format!("rustc version {}", option_env!("CFG_VERSION").expect("CFG_VERSION")); let name_metadata = llvm::LLVMMDStringInContext( @@ -469,7 +447,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { compiler_used_statics: RefCell::new(Vec::new()), type_lowering: Default::default(), scalar_lltypes: Default::default(), - pointee_infos: Default::default(), isize_ty, coverage_cx, dbg_cx, @@ -558,11 +535,12 @@ impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { let tcx = self.tcx; let llfn = match tcx.lang_items().eh_personality() { - Some(def_id) if name.is_none() => self.get_fn_addr( - ty::Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, ty::List::empty()) - .unwrap() - .unwrap(), - ), + Some(def_id) if name.is_none() => self.get_fn_addr(ty::Instance::expect_resolve( + tcx, + ty::ParamEnv::reveal_all(), + def_id, + ty::List::empty(), + )), _ => { let name = name.unwrap_or("rust_eh_personality"); if let Some(llfn) = self.get_declared_value(name) { diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs index 017843c7e7d..2af28146a51 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs @@ -164,6 +164,15 @@ impl CounterMappingRegion { end_line, end_col, ), + MappingKind::Branch { true_term, false_term } => Self::branch_region( + Counter::from_term(true_term), + Counter::from_term(false_term), + local_file_id, + start_line, + start_col, + end_line, + end_col, + ), } } @@ -188,9 +197,6 @@ impl CounterMappingRegion { } } - // This function might be used in the future; the LLVM API is still evolving, as is coverage - // support. - #[allow(dead_code)] pub(crate) fn branch_region( counter: Counter, false_counter: Counter, diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index c45787f35aa..278db21b0a1 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -6,9 +6,8 @@ use crate::llvm; use itertools::Itertools as _; use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods}; -use rustc_data_structures::fx::{FxIndexMap, FxIndexSet}; -use rustc_hir::def::DefKind; -use rustc_hir::def_id::DefId; +use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet}; +use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_index::IndexVec; use rustc_middle::bug; use rustc_middle::mir; @@ -174,8 +173,14 @@ impl GlobalFileTable { // Since rustc generates coverage maps with relative paths, the // compilation directory can be combined with the relative paths // to get absolute paths, if needed. + use rustc_session::config::RemapPathScopeComponents; use rustc_session::RemapFileNameExt; - let working_dir: &str = &tcx.sess.opts.working_dir.for_codegen(tcx.sess).to_string_lossy(); + let working_dir: &str = &tcx + .sess + .opts + .working_dir + .for_scope(tcx.sess, RemapPathScopeComponents::MACRO) + .to_string_lossy(); llvm::build_byte_buffer(|buffer| { coverageinfo::write_filenames_section_to_buffer( @@ -335,16 +340,9 @@ fn save_function_record( ); } -/// When finalizing the coverage map, `FunctionCoverage` only has the `CodeRegion`s and counters for -/// the functions that went through codegen; such as public functions and "used" functions -/// (functions referenced by other "used" or public items). Any other functions considered unused, -/// or "Unreachable", were still parsed and processed through the MIR stage, but were not -/// codegenned. (Note that `-Clink-dead-code` can force some unused code to be codegenned, but -/// that flag is known to cause other errors, when combined with `-C instrument-coverage`; and -/// `-Clink-dead-code` will not generate code for unused generic functions.) -/// -/// We can find the unused functions (including generic functions) by the set difference of all MIR -/// `DefId`s (`tcx` query `mir_keys`) minus the codegenned `DefId`s (`codegenned_and_inlined_items`). +/// Each CGU will normally only emit coverage metadata for the functions that it actually generates. +/// But since we don't want unused functions to disappear from coverage reports, we also scan for +/// functions that were instrumented but are not participating in codegen. /// /// These unused functions don't need to be codegenned, but we do need to add them to the function /// coverage map (in a single designated CGU) so that we still emit coverage mappings for them. @@ -354,76 +352,109 @@ fn add_unused_functions(cx: &CodegenCx<'_, '_>) { assert!(cx.codegen_unit.is_code_coverage_dead_code_cgu()); let tcx = cx.tcx; + let usage = prepare_usage_sets(tcx); + + let is_unused_fn = |def_id: LocalDefId| -> bool { + let def_id = def_id.to_def_id(); + + // To be eligible for "unused function" mappings, a definition must: + // - Be function-like + // - Not participate directly in codegen (or have lost all its coverage statements) + // - Not have any coverage statements inlined into codegenned functions + tcx.def_kind(def_id).is_fn_like() + && (!usage.all_mono_items.contains(&def_id) + || usage.missing_own_coverage.contains(&def_id)) + && !usage.used_via_inlining.contains(&def_id) + }; - let ignore_unused_generics = tcx.sess.instrument_coverage_except_unused_generics(); - - let eligible_def_ids = tcx.mir_keys(()).iter().filter_map(|local_def_id| { - let def_id = local_def_id.to_def_id(); - let kind = tcx.def_kind(def_id); - // `mir_keys` will give us `DefId`s for all kinds of things, not - // just "functions", like consts, statics, etc. Filter those out. - // If `ignore_unused_generics` was specified, filter out any - // generic functions from consideration as well. - if !matches!(kind, DefKind::Fn | DefKind::AssocFn | DefKind::Closure) { - return None; - } - if ignore_unused_generics && tcx.generics_of(def_id).requires_monomorphization(tcx) { - return None; - } - Some(local_def_id.to_def_id()) - }); - - let codegenned_def_ids = codegenned_and_inlined_items(tcx); + // Scan for unused functions that were instrumented for coverage. + for def_id in tcx.mir_keys(()).iter().copied().filter(|&def_id| is_unused_fn(def_id)) { + // Get the coverage info from MIR, skipping functions that were never instrumented. + let body = tcx.optimized_mir(def_id); + let Some(function_coverage_info) = body.function_coverage_info.as_deref() else { continue }; - // For each `DefId` that should have coverage instrumentation but wasn't - // codegenned, add it to the function coverage map as an unused function. - for def_id in eligible_def_ids.filter(|id| !codegenned_def_ids.contains(id)) { - // Skip any function that didn't have coverage data added to it by the - // coverage instrumentor. - let body = tcx.instance_mir(ty::InstanceDef::Item(def_id)); - let Some(function_coverage_info) = body.function_coverage_info.as_deref() else { - continue; - }; + // FIXME(79651): Consider trying to filter out dummy instantiations of + // unused generic functions from library crates, because they can produce + // "unused instantiation" in coverage reports even when they are actually + // used by some downstream crate in the same binary. debug!("generating unused fn: {def_id:?}"); - let instance = declare_unused_fn(tcx, def_id); - add_unused_function_coverage(cx, instance, function_coverage_info); + add_unused_function_coverage(cx, def_id, function_coverage_info); } } -/// All items participating in code generation together with (instrumented) -/// items inlined into them. -fn codegenned_and_inlined_items(tcx: TyCtxt<'_>) -> DefIdSet { - let (items, cgus) = tcx.collect_and_partition_mono_items(()); - let mut visited = DefIdSet::default(); - let mut result = items.clone(); - - for cgu in cgus { - for item in cgu.items().keys() { - if let mir::mono::MonoItem::Fn(ref instance) = item { - let did = instance.def_id(); - if !visited.insert(did) { - continue; - } - let body = tcx.instance_mir(instance.def); - for block in body.basic_blocks.iter() { - for statement in &block.statements { - let mir::StatementKind::Coverage(_) = statement.kind else { continue }; - let scope = statement.source_info.scope; - if let Some(inlined) = scope.inlined_instance(&body.source_scopes) { - result.insert(inlined.def_id()); - } - } - } +struct UsageSets<'tcx> { + all_mono_items: &'tcx DefIdSet, + used_via_inlining: FxHashSet<DefId>, + missing_own_coverage: FxHashSet<DefId>, +} + +/// Prepare sets of definitions that are relevant to deciding whether something +/// is an "unused function" for coverage purposes. +fn prepare_usage_sets<'tcx>(tcx: TyCtxt<'tcx>) -> UsageSets<'tcx> { + let (all_mono_items, cgus) = tcx.collect_and_partition_mono_items(()); + + // Obtain a MIR body for each function participating in codegen, via an + // arbitrary instance. + let mut def_ids_seen = FxHashSet::default(); + let def_and_mir_for_all_mono_fns = cgus + .iter() + .flat_map(|cgu| cgu.items().keys()) + .filter_map(|item| match item { + mir::mono::MonoItem::Fn(instance) => Some(instance), + mir::mono::MonoItem::Static(_) | mir::mono::MonoItem::GlobalAsm(_) => None, + }) + // We only need one arbitrary instance per definition. + .filter(move |instance| def_ids_seen.insert(instance.def_id())) + .map(|instance| { + // We don't care about the instance, just its underlying MIR. + let body = tcx.instance_mir(instance.def); + (instance.def_id(), body) + }); + + // Functions whose coverage statments were found inlined into other functions. + let mut used_via_inlining = FxHashSet::default(); + // Functions that were instrumented, but had all of their coverage statements + // removed by later MIR transforms (e.g. UnreachablePropagation). + let mut missing_own_coverage = FxHashSet::default(); + + for (def_id, body) in def_and_mir_for_all_mono_fns { + let mut saw_own_coverage = false; + + // Inspect every coverage statement in the function's MIR. + for stmt in body + .basic_blocks + .iter() + .flat_map(|block| &block.statements) + .filter(|stmt| matches!(stmt.kind, mir::StatementKind::Coverage(_))) + { + if let Some(inlined) = stmt.source_info.scope.inlined_instance(&body.source_scopes) { + // This coverage statement was inlined from another function. + used_via_inlining.insert(inlined.def_id()); + } else { + // Non-inlined coverage statements belong to the enclosing function. + saw_own_coverage = true; } } + + if !saw_own_coverage && body.function_coverage_info.is_some() { + missing_own_coverage.insert(def_id); + } } - result + UsageSets { all_mono_items, used_via_inlining, missing_own_coverage } } -fn declare_unused_fn<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::Instance<'tcx> { - ty::Instance::new( +fn add_unused_function_coverage<'tcx>( + cx: &CodegenCx<'_, 'tcx>, + def_id: LocalDefId, + function_coverage_info: &'tcx mir::coverage::FunctionCoverageInfo, +) { + let tcx = cx.tcx; + let def_id = def_id.to_def_id(); + + // Make a dummy instance that fills in all generics with placeholders. + let instance = ty::Instance::new( def_id, ty::GenericArgs::for_item(tcx, def_id, |param, _| { if let ty::GenericParamDefKind::Lifetime = param.kind { @@ -432,14 +463,8 @@ fn declare_unused_fn<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> ty::Instance<'tc tcx.mk_param_from_def(param) } }), - ) -} + ); -fn add_unused_function_coverage<'tcx>( - cx: &CodegenCx<'_, 'tcx>, - instance: ty::Instance<'tcx>, - function_coverage_info: &'tcx mir::coverage::FunctionCoverageInfo, -) { // An unused function's mappings will automatically be rewritten to map to // zero, because none of its counters/expressions are marked as seen. let function_coverage = FunctionCoverageCollector::unused(instance, function_coverage_info); diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 733a77d24c2..68c1770c1d4 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -14,9 +14,9 @@ use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; use rustc_llvm::RustString; use rustc_middle::bug; use rustc_middle::mir::coverage::CoverageKind; -use rustc_middle::mir::Coverage; use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::Instance; +use rustc_target::abi::Align; use std::cell::RefCell; @@ -24,7 +24,7 @@ pub(crate) mod ffi; pub(crate) mod map_data; pub mod mapgen; -const VAR_ALIGN_BYTES: usize = 8; +const VAR_ALIGN: Align = Align::EIGHT; /// A context object for maintaining all state needed by the coverageinfo module. pub struct CrateCoverageContext<'ll, 'tcx> { @@ -75,7 +75,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { #[instrument(level = "debug", skip(self))] - fn add_coverage(&mut self, instance: Instance<'tcx>, coverage: &Coverage) { + fn add_coverage(&mut self, instance: Instance<'tcx>, kind: &CoverageKind) { // Our caller should have already taken care of inlining subtleties, // so we can assume that counter/expression IDs in this coverage // statement are meaningful for the given instance. @@ -85,14 +85,6 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { let bx = self; - match coverage.kind { - // Marker statements have no effect during codegen, - // so return early and don't create `func_coverage`. - CoverageKind::SpanMarker => return, - // Match exhaustively to ensure that newly-added kinds are classified correctly. - CoverageKind::CounterIncrement { .. } | CoverageKind::ExpressionUsed { .. } => {} - } - let Some(function_coverage_info) = bx.tcx.instance_mir(instance.def).function_coverage_info.as_deref() else { @@ -106,10 +98,9 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { .entry(instance) .or_insert_with(|| FunctionCoverageCollector::new(instance, function_coverage_info)); - let Coverage { kind } = coverage; match *kind { - CoverageKind::SpanMarker => unreachable!( - "unexpected marker statement {kind:?} should have caused an early return" + CoverageKind::SpanMarker | CoverageKind::BlockMarker { .. } => unreachable!( + "marker statement {kind:?} should have been removed by CleanupPostBorrowck" ), CoverageKind::CounterIncrement { id } => { func_coverage.mark_counter_id_seen(id); @@ -235,7 +226,7 @@ pub(crate) fn save_cov_data_to_mod<'ll, 'tcx>( llvm::set_global_constant(llglobal, true); llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::set_section(llglobal, &covmap_section_name); - llvm::set_alignment(llglobal, VAR_ALIGN_BYTES); + llvm::set_alignment(llglobal, VAR_ALIGN); cx.add_used_global(llglobal); } @@ -265,7 +256,7 @@ pub(crate) fn save_func_record_to_mod<'ll, 'tcx>( llvm::set_linkage(llglobal, llvm::Linkage::LinkOnceODRLinkage); llvm::set_visibility(llglobal, llvm::Visibility::Hidden); llvm::set_section(llglobal, covfun_section_name); - llvm::set_alignment(llglobal, VAR_ALIGN_BYTES); + llvm::set_alignment(llglobal, VAR_ALIGN); llvm::set_comdat(cx.llmod, llglobal, &func_record_var_name); cx.add_used_global(llglobal); } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 660f1647367..e5fecddec52 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -26,6 +26,7 @@ use rustc_codegen_ssa::debuginfo::type_names::VTableNameKind; use rustc_codegen_ssa::traits::*; use rustc_fs_util::path_to_c_string; use rustc_hir::def::CtorKind; +use rustc_hir::def::DefKind; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_middle::bug; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; @@ -451,7 +452,7 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D ty::Slice(_) | ty::Str => build_slice_type_di_node(cx, t, unique_type_id), ty::Dynamic(..) => build_dyn_type_di_node(cx, t, unique_type_id), ty::Foreign(..) => build_foreign_type_di_node(cx, t, unique_type_id), - ty::RawPtr(ty::TypeAndMut { ty: pointee_type, .. }) | ty::Ref(_, pointee_type, _) => { + ty::RawPtr(pointee_type, _) | ty::Ref(_, pointee_type, _) => { build_pointer_or_reference_di_node(cx, t, pointee_type, unique_type_id) } // Some `Box` are newtyped pointers, make debuginfo aware of that. @@ -553,13 +554,16 @@ pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> ) -> &'ll DIFile { debug!(?source_file.name); - use rustc_session::RemapFileNameExt; + let filename_display_preference = + cx.sess().filename_display_preference(RemapPathScopeComponents::DEBUGINFO); + + use rustc_session::config::RemapPathScopeComponents; let (directory, file_name) = match &source_file.name { FileName::Real(filename) => { let working_directory = &cx.sess().opts.working_dir; debug!(?working_directory); - if cx.sess().should_prefer_remapped_for_codegen() { + if filename_display_preference == FileNameDisplayPreference::Remapped { let filename = cx .sess() .source_map() @@ -622,7 +626,7 @@ pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> } other => { debug!(?other); - ("".into(), other.for_codegen(cx.sess()).to_string_lossy().into_owned()) + ("".into(), other.display(filename_display_preference).to_string()) } }; @@ -831,9 +835,11 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>( codegen_unit_name: &str, debug_context: &CodegenUnitDebugContext<'ll, 'tcx>, ) -> &'ll DIDescriptor { + use rustc_session::{config::RemapPathScopeComponents, RemapFileNameExt}; let mut name_in_debuginfo = tcx .sess .local_crate_source_file() + .map(|src| src.for_scope(&tcx.sess, RemapPathScopeComponents::DEBUGINFO).to_path_buf()) .unwrap_or_else(|| PathBuf::from(tcx.crate_name(LOCAL_CRATE).as_str())); // To avoid breaking split DWARF, we need to ensure that each codegen unit @@ -861,30 +867,29 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>( // FIXME(#41252) Remove "clang LLVM" if we can get GDB and LLVM to play nice. let producer = format!("clang LLVM ({rustc_producer})"); - use rustc_session::RemapFileNameExt; let name_in_debuginfo = name_in_debuginfo.to_string_lossy(); - let work_dir = tcx.sess.opts.working_dir.for_codegen(tcx.sess).to_string_lossy(); + let work_dir = tcx + .sess + .opts + .working_dir + .for_scope(tcx.sess, RemapPathScopeComponents::DEBUGINFO) + .to_string_lossy(); let output_filenames = tcx.output_filenames(()); - let split_name = if tcx.sess.target_can_use_split_dwarf() { - output_filenames - .split_dwarf_path( - tcx.sess.split_debuginfo(), - tcx.sess.opts.unstable_opts.split_dwarf_kind, - Some(codegen_unit_name), - ) - // We get a path relative to the working directory from split_dwarf_path - .map(|f| { - if tcx.sess.should_prefer_remapped_for_split_debuginfo_paths() { - tcx.sess.source_map().path_mapping().map_prefix(f).0 - } else { - f.into() - } - }) + let split_name = if tcx.sess.target_can_use_split_dwarf() + && let Some(f) = output_filenames.split_dwarf_path( + tcx.sess.split_debuginfo(), + tcx.sess.opts.unstable_opts.split_dwarf_kind, + Some(codegen_unit_name), + ) { + // We get a path relative to the working directory from split_dwarf_path + Some(tcx.sess.source_map().path_mapping().to_real_filename(f)) } else { None - } - .unwrap_or_default(); - let split_name = split_name.to_str().unwrap(); + }; + let split_name = split_name + .as_ref() + .map(|f| f.for_scope(tcx.sess, RemapPathScopeComponents::DEBUGINFO).to_string_lossy()) + .unwrap_or_default(); let kind = DebugEmissionKind::from_generic(tcx.sess.opts.debuginfo); let dwarf_version = @@ -1309,6 +1314,11 @@ pub fn build_global_var_di_node<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId, glo }; let is_local_to_unit = is_node_local_to_unit(cx, def_id); + + let DefKind::Static { nested, .. } = cx.tcx.def_kind(def_id) else { bug!() }; + if nested { + return; + } let variable_type = Instance::mono(cx.tcx, def_id).ty(cx.tcx, ty::ParamEnv::reveal_all()); let type_di_node = type_di_node(cx, variable_type); let var_name = tcx.item_name(def_id); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs index 4792b0798df..4edef14422e 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs @@ -683,7 +683,8 @@ fn build_union_fields_for_direct_tag_coroutine<'ll, 'tcx>( _ => unreachable!(), }; - let coroutine_layout = cx.tcx.optimized_mir(coroutine_def_id).coroutine_layout().unwrap(); + let coroutine_layout = + cx.tcx.coroutine_layout(coroutine_def_id, coroutine_args.kind_ty()).unwrap(); let common_upvar_names = cx.tcx.closure_saved_names_of_captured_variables(coroutine_def_id); let variant_range = coroutine_args.variant_range(coroutine_def_id, cx.tcx); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs index 3dbe820b8ff..115d5187eaf 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs @@ -135,7 +135,7 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>( unique_type_id: UniqueTypeId<'tcx>, ) -> DINodeCreationResult<'ll> { let coroutine_type = unique_type_id.expect_ty(); - let &ty::Coroutine(coroutine_def_id, _) = coroutine_type.kind() else { + let &ty::Coroutine(coroutine_def_id, coroutine_args) = coroutine_type.kind() else { bug!("build_coroutine_di_node() called with non-coroutine type: `{:?}`", coroutine_type) }; @@ -158,8 +158,10 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>( DIFlags::FlagZero, ), |cx, coroutine_type_di_node| { - let coroutine_layout = - cx.tcx.optimized_mir(coroutine_def_id).coroutine_layout().unwrap(); + let coroutine_layout = cx + .tcx + .coroutine_layout(coroutine_def_id, coroutine_args.as_coroutine().kind_ty()) + .unwrap(); let Variants::Multiple { tag_encoding: TagEncoding::Direct, ref variants, .. } = coroutine_type_and_layout.variants diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 78c0725a637..3ef8538ced3 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -18,7 +18,9 @@ use crate::llvm; use crate::llvm::AttributePlace::Function; use crate::type_::Type; use crate::value::Value; +use itertools::Itertools; use rustc_codegen_ssa::traits::TypeMembershipMethods; +use rustc_data_structures::fx::FxIndexSet; use rustc_middle::ty::{Instance, Ty}; use rustc_symbol_mangling::typeid::{ kcfi_typeid_for_fnabi, kcfi_typeid_for_instance, typeid_for_fnabi, typeid_for_instance, @@ -141,33 +143,31 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { if self.tcx.sess.is_sanitizer_cfi_enabled() { if let Some(instance) = instance { - let typeid = typeid_for_instance(self.tcx, &instance, TypeIdOptions::empty()); - self.set_type_metadata(llfn, typeid); - let typeid = - typeid_for_instance(self.tcx, &instance, TypeIdOptions::GENERALIZE_POINTERS); - self.add_type_metadata(llfn, typeid); - let typeid = - typeid_for_instance(self.tcx, &instance, TypeIdOptions::NORMALIZE_INTEGERS); - self.add_type_metadata(llfn, typeid); - let typeid = typeid_for_instance( - self.tcx, - &instance, - TypeIdOptions::GENERALIZE_POINTERS | TypeIdOptions::NORMALIZE_INTEGERS, - ); - self.add_type_metadata(llfn, typeid); + let mut typeids = FxIndexSet::default(); + for options in [ + TypeIdOptions::GENERALIZE_POINTERS, + TypeIdOptions::NORMALIZE_INTEGERS, + TypeIdOptions::NO_SELF_TYPE_ERASURE, + ] + .into_iter() + .powerset() + .map(TypeIdOptions::from_iter) + { + let typeid = typeid_for_instance(self.tcx, instance, options); + if typeids.insert(typeid.clone()) { + self.add_type_metadata(llfn, typeid); + } + } } else { - let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::empty()); - self.set_type_metadata(llfn, typeid); - let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::GENERALIZE_POINTERS); - self.add_type_metadata(llfn, typeid); - let typeid = typeid_for_fnabi(self.tcx, fn_abi, TypeIdOptions::NORMALIZE_INTEGERS); - self.add_type_metadata(llfn, typeid); - let typeid = typeid_for_fnabi( - self.tcx, - fn_abi, - TypeIdOptions::GENERALIZE_POINTERS | TypeIdOptions::NORMALIZE_INTEGERS, - ); - self.add_type_metadata(llfn, typeid); + for options in + [TypeIdOptions::GENERALIZE_POINTERS, TypeIdOptions::NORMALIZE_INTEGERS] + .into_iter() + .powerset() + .map(TypeIdOptions::from_iter) + { + let typeid = typeid_for_fnabi(self.tcx, fn_abi, options); + self.add_type_metadata(llfn, typeid); + } } } @@ -182,7 +182,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { } if let Some(instance) = instance { - let kcfi_typeid = kcfi_typeid_for_instance(self.tcx, &instance, options); + let kcfi_typeid = kcfi_typeid_for_instance(self.tcx, instance, options); self.set_kcfi_type_metadata(llfn, kcfi_typeid); } else { let kcfi_typeid = kcfi_typeid_for_fnabi(self.tcx, fn_abi, options); diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs index 5bef240340b..e15eda7c66c 100644 --- a/compiler/rustc_codegen_llvm/src/errors.rs +++ b/compiler/rustc_codegen_llvm/src/errors.rs @@ -4,7 +4,7 @@ use std::path::Path; use crate::fluent_generated as fluent; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_errors::{Diag, DiagCtxt, EmissionGuarantee, IntoDiagnostic, Level}; +use rustc_errors::{Diag, DiagCtxt, Diagnostic, EmissionGuarantee, Level}; use rustc_macros::{Diagnostic, Subdiagnostic}; use rustc_span::Span; @@ -99,9 +99,9 @@ pub(crate) struct DynamicLinkingWithLTO; pub(crate) struct ParseTargetMachineConfig<'a>(pub LlvmError<'a>); -impl<G: EmissionGuarantee> IntoDiagnostic<'_, G> for ParseTargetMachineConfig<'_> { - fn into_diagnostic(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> { - let diag: Diag<'_, G> = self.0.into_diagnostic(dcx, level); +impl<G: EmissionGuarantee> Diagnostic<'_, G> for ParseTargetMachineConfig<'_> { + fn into_diag(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> { + let diag: Diag<'_, G> = self.0.into_diag(dcx, level); let (message, _) = diag.messages.first().expect("`LlvmError` with no message"); let message = dcx.eagerly_translate_to_string(message.clone(), diag.args.iter()); Diag::new(dcx, level, fluent::codegen_llvm_parse_target_machine_config) @@ -119,8 +119,8 @@ pub(crate) struct TargetFeatureDisableOrEnable<'a> { #[help(codegen_llvm_missing_features)] pub(crate) struct MissingFeatures; -impl<G: EmissionGuarantee> IntoDiagnostic<'_, G> for TargetFeatureDisableOrEnable<'_> { - fn into_diagnostic(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> { +impl<G: EmissionGuarantee> Diagnostic<'_, G> for TargetFeatureDisableOrEnable<'_> { + fn into_diag(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> { let mut diag = Diag::new(dcx, level, fluent::codegen_llvm_target_feature_disable_or_enable); if let Some(span) = self.span { diag.span(span); @@ -179,8 +179,8 @@ pub enum LlvmError<'a> { pub(crate) struct WithLlvmError<'a>(pub LlvmError<'a>, pub String); -impl<G: EmissionGuarantee> IntoDiagnostic<'_, G> for WithLlvmError<'_> { - fn into_diagnostic(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> { +impl<G: EmissionGuarantee> Diagnostic<'_, G> for WithLlvmError<'_> { + fn into_diag(self, dcx: &'_ DiagCtxt, level: Level) -> Diag<'_, G> { use LlvmError::*; let msg_with_llvm_err = match &self.0 { WriteOutput { .. } => fluent::codegen_llvm_write_output_with_llvm_err, @@ -198,7 +198,7 @@ impl<G: EmissionGuarantee> IntoDiagnostic<'_, G> for WithLlvmError<'_> { ParseBitcode => fluent::codegen_llvm_parse_bitcode_with_llvm_err, }; self.0 - .into_diagnostic(dcx, level) + .into_diag(dcx, level) .with_primary_message(msg_with_llvm_err) .with_arg("llvm_err", self.1) } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 467e02d55e3..e4ec7974e90 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -181,6 +181,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { simple_fn, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None, + Some(instance), ) } sym::likely => { @@ -539,7 +540,7 @@ fn catch_unwind_intrinsic<'ll>( ) { if bx.sess().panic_strategy() == PanicStrategy::Abort { let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); - bx.call(try_func_ty, None, None, try_func, &[data], None); + bx.call(try_func_ty, None, None, try_func, &[data], None, None); // Return 0 unconditionally from the intrinsic call; // we can never unwind. let ret_align = bx.tcx().data_layout.i32_align.abi; @@ -640,7 +641,7 @@ fn codegen_msvc_try<'ll>( let ptr_align = bx.tcx().data_layout.pointer_align.abi; let slot = bx.alloca(bx.type_ptr(), ptr_align); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); - bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None); + bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None); bx.switch_to_block(normal); bx.ret(bx.const_i32(0)); @@ -684,7 +685,7 @@ fn codegen_msvc_try<'ll>( let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]); let ptr = bx.load(bx.type_ptr(), slot, ptr_align); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); - bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet)); + bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None); bx.catch_ret(&funclet, caught); // The flag value of 64 indicates a "catch-all". @@ -692,7 +693,7 @@ fn codegen_msvc_try<'ll>( let flags = bx.const_i32(64); let null = bx.const_null(bx.type_ptr()); let funclet = bx.catch_pad(cs, &[null, flags, null]); - bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet)); + bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet), None); bx.catch_ret(&funclet, caught); bx.switch_to_block(caught); @@ -701,7 +702,7 @@ fn codegen_msvc_try<'ll>( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -750,7 +751,7 @@ fn codegen_wasm_try<'ll>( // } // let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); - bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None); + bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None, None); bx.switch_to_block(normal); bx.ret(bx.const_i32(0)); @@ -766,7 +767,7 @@ fn codegen_wasm_try<'ll>( let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); - bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet)); + bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet), None); bx.catch_ret(&funclet, caught); bx.switch_to_block(caught); @@ -775,7 +776,7 @@ fn codegen_wasm_try<'ll>( // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -818,7 +819,7 @@ fn codegen_gnu_try<'ll>( let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); - bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None); + bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None); bx.switch_to_block(then); bx.ret(bx.const_i32(0)); @@ -836,13 +837,13 @@ fn codegen_gnu_try<'ll>( bx.add_clause(vals, tydesc); let ptr = bx.extract_value(vals, 0); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); - bx.call(catch_ty, None, None, catch_func, &[data, ptr], None); + bx.call(catch_ty, None, None, catch_func, &[data, ptr], None, None); bx.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -882,7 +883,7 @@ fn codegen_emcc_try<'ll>( let data = llvm::get_param(bx.llfn(), 1); let catch_func = llvm::get_param(bx.llfn(), 2); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void()); - bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None); + bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None, None); bx.switch_to_block(then); bx.ret(bx.const_i32(0)); @@ -920,13 +921,13 @@ fn codegen_emcc_try<'ll>( bx.store(is_rust_panic, catch_data_1, i8_align); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void()); - bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None); + bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None, None); bx.ret(bx.const_i32(1)); }); // Note that no invoke is used here because by definition this function // can't panic (that's what it's catching). - let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None); + let ret = bx.call(llty, None, None, llfn, &[try_func, data, catch_func], None, None); let i32_align = bx.tcx().data_layout.i32_align.abi; bx.store(ret, dest, i32_align); } @@ -1129,7 +1130,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_shuffle_generic { let idx = fn_args[2] .expect_const() - .eval(tcx, ty::ParamEnv::reveal_all(), Some(span)) + .eval(tcx, ty::ParamEnv::reveal_all(), span) .unwrap() .unwrap_branch(); let n = idx.len() as u64; @@ -1439,6 +1440,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), None, + None, ); Ok(c) } @@ -1483,7 +1485,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( v.normalize(bx.target_spec().pointer_width).bit_width().unwrap() ), ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()), - ty::RawPtr(_) => format!("v{}p0", vec_len), + ty::RawPtr(_, _) => format!("v{}p0", vec_len), _ => unreachable!(), } } @@ -1493,7 +1495,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( ty::Int(v) => cx.type_int_from_ty(v), ty::Uint(v) => cx.type_uint_from_ty(v), ty::Float(v) => cx.type_float_from_ty(v), - ty::RawPtr(_) => cx.type_ptr(), + ty::RawPtr(_, _) => cx.type_ptr(), _ => unreachable!(), }; cx.type_vector(elem_ty, vec_len) @@ -1548,8 +1550,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( require!( matches!( - element_ty1.kind(), - ty::RawPtr(p) if p.ty == in_elem && p.ty.kind() == element_ty0.kind() + *element_ty1.kind(), + ty::RawPtr(p_ty, _) if p_ty == in_elem && p_ty.kind() == element_ty0.kind() ), InvalidMonomorphization::ExpectedElementType { span, @@ -1607,6 +1609,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None, + None, ); return Ok(v); } @@ -1654,8 +1657,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( require!( matches!( - pointer_ty.kind(), - ty::RawPtr(p) if p.ty == values_elem && p.ty.kind() == values_elem.kind() + *pointer_ty.kind(), + ty::RawPtr(p_ty, _) if p_ty == values_elem && p_ty.kind() == values_elem.kind() ), InvalidMonomorphization::ExpectedElementType { span, @@ -1706,6 +1709,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &[args[1].immediate(), alignment, mask, args[2].immediate()], None, + None, ); return Ok(v); } @@ -1746,8 +1750,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // The second argument must be a mutable pointer type matching the element type require!( matches!( - pointer_ty.kind(), - ty::RawPtr(p) if p.ty == values_elem && p.ty.kind() == values_elem.kind() && p.mutbl.is_mut() + *pointer_ty.kind(), + ty::RawPtr(p_ty, p_mutbl) if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut() ), InvalidMonomorphization::ExpectedElementType { span, @@ -1799,6 +1803,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &[args[2].immediate(), args[1].immediate(), alignment, mask], None, + None, ); return Ok(v); } @@ -1843,9 +1848,9 @@ fn generic_simd_intrinsic<'ll, 'tcx>( require!( matches!( - element_ty1.kind(), - ty::RawPtr(p) - if p.ty == in_elem && p.mutbl.is_mut() && p.ty.kind() == element_ty0.kind() + *element_ty1.kind(), + ty::RawPtr(p_ty, p_mutbl) + if p_ty == in_elem && p_mutbl.is_mut() && p_ty.kind() == element_ty0.kind() ), InvalidMonomorphization::ExpectedElementType { span, @@ -1904,6 +1909,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &[args[0].immediate(), args[1].immediate(), alignment, mask], None, + None, ); return Ok(v); } @@ -2074,8 +2080,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( ); match in_elem.kind() { - ty::RawPtr(p) => { - let metadata = p.ty.ptr_metadata_ty(bx.tcx, |ty| { + ty::RawPtr(p_ty, _) => { + let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| { bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty) }); require!( @@ -2088,8 +2094,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } } match out_elem.kind() { - ty::RawPtr(p) => { - let metadata = p.ty.ptr_metadata_ty(bx.tcx, |ty| { + ty::RawPtr(p_ty, _) => { + let metadata = p_ty.ptr_metadata_ty(bx.tcx, |ty| { bx.tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), ty) }); require!( @@ -2120,7 +2126,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( ); match in_elem.kind() { - ty::RawPtr(_) => {} + ty::RawPtr(_, _) => {} _ => { return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: in_elem }) } @@ -2133,7 +2139,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return Ok(bx.ptrtoint(args[0].immediate(), llret_ty)); } - if name == sym::simd_from_exposed_addr { + if name == sym::simd_with_exposed_provenance { let (out_len, out_elem) = require_simd!(ret_ty, SimdReturn); require!( in_len == out_len, @@ -2152,7 +2158,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( _ => return_error!(InvalidMonomorphization::ExpectedUsize { span, name, ty: in_elem }), } match out_elem.kind() { - ty::RawPtr(_) => {} + ty::RawPtr(_, _) => {} _ => { return_error!(InvalidMonomorphization::ExpectedPointer { span, name, ty: out_elem }) } @@ -2352,11 +2358,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>( f, &[args[0].immediate(), bx.const_int(bx.type_i1(), 0)], None, + None, )) } else { let fn_ty = bx.type_func(&[vec_ty], vec_ty); let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None)) + Ok(bx.call(fn_ty, None, None, f, &[args[0].immediate()], None, None)) }; } @@ -2409,7 +2416,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let fn_ty = bx.type_func(&[vec_ty, vec_ty], vec_ty); let f = bx.declare_cfn(llvm_intrinsic, llvm::UnnamedAddr::No, fn_ty); - let v = bx.call(fn_ty, None, None, f, &[lhs, rhs], None); + let v = bx.call(fn_ty, None, None, f, &[lhs, rhs], None, None); return Ok(v); } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 58e98037067..284bc74d5c4 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -936,10 +936,16 @@ extern "C" { pub fn LLVMConstReal(RealTy: &Type, N: f64) -> &Value; // Operations on composite constants - pub fn LLVMConstStringInContext( + pub fn LLVMConstArray2<'a>( + ElementTy: &'a Type, + ConstantVals: *const &'a Value, + Length: u64, + ) -> &'a Value; + pub fn LLVMArrayType2(ElementType: &Type, ElementCount: u64) -> &Type; + pub fn LLVMConstStringInContext2( C: &Context, Str: *const c_char, - Length: c_uint, + Length: size_t, DontNullTerminate: Bool, ) -> &Value; pub fn LLVMConstStructInContext<'a>( @@ -948,14 +954,6 @@ extern "C" { Count: c_uint, Packed: Bool, ) -> &'a Value; - - // FIXME: replace with LLVMConstArray2 when bumped minimal version to llvm-17 - // https://github.com/llvm/llvm-project/commit/35276f16e5a2cae0dfb49c0fbf874d4d2f177acc - pub fn LLVMConstArray<'a>( - ElementTy: &'a Type, - ConstantVals: *const &'a Value, - Length: c_uint, - ) -> &'a Value; pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value; // Constant expressions @@ -1521,7 +1519,7 @@ extern "C" { #[link(name = "llvm-wrapper", kind = "static")] extern "C" { - pub fn LLVMRustInstallFatalErrorHandler(); + pub fn LLVMRustInstallErrorHandlers(); pub fn LLVMRustDisableSystemDialogsOnCrash(); // Create and destroy contexts. @@ -1530,9 +1528,6 @@ extern "C" { /// See llvm::LLVMTypeKind::getTypeID. pub fn LLVMRustGetTypeKind(Ty: &Type) -> TypeKind; - // Operations on array, pointer, and vector types (sequence types) - pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type; - // Operations on all values pub fn LLVMRustGlobalAddMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata); pub fn LLVMRustIsNonGVFunctionPointerTy(Val: &Value) -> bool; diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 4f5cc575da6..6ab1eea9597 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -11,6 +11,7 @@ pub use self::RealPredicate::*; use libc::c_uint; use rustc_data_structures::small_c_str::SmallCStr; use rustc_llvm::RustString; +use rustc_target::abi::Align; use std::cell::RefCell; use std::ffi::{CStr, CString}; use std::str::FromStr; @@ -229,9 +230,9 @@ pub fn set_visibility(llglobal: &Value, visibility: Visibility) { } } -pub fn set_alignment(llglobal: &Value, bytes: usize) { +pub fn set_alignment(llglobal: &Value, align: Align) { unsafe { - ffi::LLVMSetAlignment(llglobal, bytes as c_uint); + ffi::LLVMSetAlignment(llglobal, align.bytes() as c_uint); } } diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index 78d47f36f91..c9e62e504ae 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -5,6 +5,7 @@ use crate::errors::{ }; use crate::llvm; use libc::c_int; +use rustc_codegen_ssa::base::wants_wasm_eh; use rustc_codegen_ssa::traits::PrintBackendInfo; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::small_c_str::SmallCStr; @@ -48,7 +49,7 @@ unsafe fn configure_llvm(sess: &Session) { let mut llvm_c_strs = Vec::with_capacity(n_args + 1); let mut llvm_args = Vec::with_capacity(n_args + 1); - llvm::LLVMRustInstallFatalErrorHandler(); + llvm::LLVMRustInstallErrorHandlers(); // On Windows, an LLVM assertion will open an Abort/Retry/Ignore dialog // box for the purpose of launching a debugger. However, on CI this will // cause it to hang until it times out, which can take several hours. @@ -98,6 +99,10 @@ unsafe fn configure_llvm(sess: &Session) { } } + if wants_wasm_eh(sess) { + add("-wasm-enable-eh", false); + } + if sess.target.os == "emscripten" && sess.panic_strategy() == PanicStrategy::Unwind { add("-enable-emscripten-cxx-exceptions", false); } @@ -523,6 +528,10 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str .map(String::from), ); + if wants_wasm_eh(sess) && sess.panic_strategy() == PanicStrategy::Unwind { + features.push("+exception-handling".into()); + } + // -Ctarget-features let supported_features = sess.target.supported_target_features(); let mut featsmap = FxHashMap::default(); diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs index f7630719368..29100a64171 100644 --- a/compiler/rustc_codegen_llvm/src/mono_item.rs +++ b/compiler/rustc_codegen_llvm/src/mono_item.rs @@ -5,7 +5,9 @@ use crate::errors::SymbolAlreadyDefined; use crate::llvm; use crate::type_of::LayoutLlvmExt; use rustc_codegen_ssa::traits::*; +use rustc_hir::def::DefKind; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; +use rustc_middle::bug; use rustc_middle::mir::mono::{Linkage, Visibility}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf}; use rustc_middle::ty::{self, Instance, TypeVisitableExt}; @@ -21,7 +23,14 @@ impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> { symbol_name: &str, ) { let instance = Instance::mono(self.tcx, def_id); - let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all()); + let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() }; + // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure out + // the llvm type from the actual evaluated initializer. + let ty = if nested { + self.tcx.types.unit + } else { + instance.ty(self.tcx, ty::ParamEnv::reveal_all()) + }; let llty = self.layout_of(ty).llvm_type(self); let g = self.define_global(symbol_name, llty).unwrap_or_else(|| { diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index 07a4861ed73..af1bbda4d08 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -233,7 +233,7 @@ impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { - unsafe { llvm::LLVMRustArrayType(ty, len) } + unsafe { llvm::LLVMArrayType2(ty, len) } } } diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 587adefe1d2..d10a083765b 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -1,5 +1,4 @@ use crate::common::*; -use crate::context::TypeLowering; use crate::type_::Type; use rustc_codegen_ssa::traits::*; use rustc_middle::bug; @@ -10,7 +9,6 @@ use rustc_target::abi::HasDataLayout; use rustc_target::abi::{Abi, Align, FieldsShape}; use rustc_target::abi::{Int, Pointer, F128, F16, F32, F64}; use rustc_target::abi::{Scalar, Size, Variants}; -use smallvec::{smallvec, SmallVec}; use std::fmt::Write; @@ -18,7 +16,6 @@ fn uncached_llvm_type<'a, 'tcx>( cx: &CodegenCx<'a, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>, - field_remapping: &mut Option<SmallVec<[u32; 4]>>, ) -> &'a Type { match layout.abi { Abi::Scalar(_) => bug!("handled elsewhere"), @@ -71,8 +68,7 @@ fn uncached_llvm_type<'a, 'tcx>( FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count), FieldsShape::Arbitrary { .. } => match name { None => { - let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout); - *field_remapping = new_field_remapping; + let (llfields, packed) = struct_llfields(cx, layout); cx.type_struct(&llfields, packed) } Some(ref name) => { @@ -87,7 +83,7 @@ fn uncached_llvm_type<'a, 'tcx>( fn struct_llfields<'a, 'tcx>( cx: &CodegenCx<'a, 'tcx>, layout: TyAndLayout<'tcx>, -) -> (Vec<&'a Type>, bool, Option<SmallVec<[u32; 4]>>) { +) -> (Vec<&'a Type>, bool) { debug!("struct_llfields: {:#?}", layout); let field_count = layout.fields.count(); @@ -95,7 +91,6 @@ fn struct_llfields<'a, 'tcx>( let mut offset = Size::ZERO; let mut prev_effective_align = layout.align.abi; let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2); - let mut field_remapping = smallvec![0; field_count]; for i in layout.fields.index_by_increasing_offset() { let target_offset = layout.fields.offset(i as usize); let field = layout.field(cx, i); @@ -120,12 +115,10 @@ fn struct_llfields<'a, 'tcx>( result.push(cx.type_padding_filler(padding, padding_align)); debug!(" padding before: {:?}", padding); } - field_remapping[i] = result.len() as u32; result.push(field.llvm_type(cx)); offset = target_offset + field.size; prev_effective_align = effective_field_align; } - let padding_used = result.len() > field_count; if layout.is_sized() && field_count > 0 { if offset > layout.size { bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset); @@ -143,8 +136,7 @@ fn struct_llfields<'a, 'tcx>( } else { debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size); } - let field_remapping = padding_used.then_some(field_remapping); - (result, packed, field_remapping) + (result, packed) } impl<'a, 'tcx> CodegenCx<'a, 'tcx> { @@ -224,7 +216,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { _ => None, }; if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) { - return llty.lltype; + return llty; } debug!("llvm_type({:#?})", self); @@ -236,7 +228,6 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { let normal_ty = cx.tcx.erase_regions(self.ty); let mut defer = None; - let mut field_remapping = None; let llty = if self.ty != normal_ty { let mut layout = cx.layout_of(normal_ty); if let Some(v) = variant_index { @@ -244,22 +235,15 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { } layout.llvm_type(cx) } else { - uncached_llvm_type(cx, *self, &mut defer, &mut field_remapping) + uncached_llvm_type(cx, *self, &mut defer) }; debug!("--> mapped {:#?} to llty={:?}", self, llty); - cx.type_lowering - .borrow_mut() - .insert((self.ty, variant_index), TypeLowering { lltype: llty, field_remapping }); + cx.type_lowering.borrow_mut().insert((self.ty, variant_index), llty); if let Some((llty, layout)) = defer { - let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout); + let (llfields, packed) = struct_llfields(cx, layout); cx.set_struct_body(llty, &llfields, packed); - cx.type_lowering - .borrow_mut() - .get_mut(&(self.ty, variant_index)) - .unwrap() - .field_remapping = new_field_remapping; } llty } |
