diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
43 files changed, 1754 insertions, 2076 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index d034f9b5256..26718792f5f 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -1,32 +1,31 @@ -use crate::attributes; -use crate::builder::Builder; -use crate::context::CodegenCx; -use crate::llvm::{self, Attribute, AttributePlace}; -use crate::llvm_util; -use crate::type_::Type; -use crate::type_of::LayoutLlvmExt; -use crate::value::Value; +use std::cmp; +use libc::c_uint; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::MemFlags; -use rustc_middle::bug; use rustc_middle::ty::layout::LayoutOf; -pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; +pub(crate) use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; use rustc_middle::ty::Ty; +use rustc_middle::{bug, ty}; use rustc_session::config; -pub use rustc_target::abi::call::*; +pub(crate) use rustc_target::abi::call::*; use rustc_target::abi::{self, HasDataLayout, Int, Size}; -pub use rustc_target::spec::abi::Abi; +pub(crate) use rustc_target::spec::abi::Abi; use rustc_target::spec::SanitizerSet; - -use libc::c_uint; use smallvec::SmallVec; -use std::cmp; +use crate::attributes::llfn_attrs_from_instance; +use crate::builder::Builder; +use crate::context::CodegenCx; +use crate::llvm::{self, Attribute, AttributePlace}; +use crate::type_::Type; +use crate::type_of::LayoutLlvmExt; +use crate::value::Value; +use crate::{attributes, llvm_util}; -pub trait ArgAttributesExt { +trait ArgAttributesExt { fn apply_attrs_to_llfn(&self, idx: AttributePlace, cx: &CodegenCx<'_, '_>, llfn: &Value); fn apply_attrs_to_callsite( &self, @@ -112,7 +111,7 @@ impl ArgAttributesExt for ArgAttributes { } } -pub trait LlvmType { +pub(crate) trait LlvmType { fn llvm_type<'ll>(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type; } @@ -121,8 +120,10 @@ impl LlvmType for Reg { match self.kind { RegKind::Integer => cx.type_ix(self.size.bits()), RegKind::Float => match self.size.bits() { + 16 => cx.type_f16(), 32 => cx.type_f32(), 64 => cx.type_f64(), + 128 => cx.type_f128(), _ => bug!("unsupported float: {:?}", self), }, RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()), @@ -170,7 +171,7 @@ impl LlvmType for CastTarget { } } -pub trait ArgAbiExt<'ll, 'tcx> { +trait ArgAbiExt<'ll, 'tcx> { fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; fn store( &self, @@ -284,7 +285,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { } } -impl<'ll, 'tcx> ArgAbiMethods<'tcx> for Builder<'_, 'll, 'tcx> { +impl<'ll, 'tcx> ArgAbiBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { fn store_fn_arg( &mut self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>, @@ -306,11 +307,20 @@ impl<'ll, 'tcx> ArgAbiMethods<'tcx> for Builder<'_, 'll, 'tcx> { } } -pub trait FnAbiLlvmExt<'ll, 'tcx> { +pub(crate) trait FnAbiLlvmExt<'ll, 'tcx> { fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; fn llvm_cconv(&self) -> llvm::CallConv; - fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value); + + /// Apply attributes to a function declaration/definition. + fn apply_attrs_llfn( + &self, + cx: &CodegenCx<'ll, 'tcx>, + llfn: &'ll Value, + instance: Option<ty::Instance<'tcx>>, + ); + + /// Apply attributes to a function call. fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value); } @@ -396,7 +406,12 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { self.conv.into() } - fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) { + fn apply_attrs_llfn( + &self, + cx: &CodegenCx<'ll, 'tcx>, + llfn: &'ll Value, + instance: Option<ty::Instance<'tcx>>, + ) { let mut func_attrs = SmallVec::<[_; 3]>::new(); if self.ret.layout.abi.is_uninhabited() { func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx)); @@ -415,9 +430,32 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { i += 1; i - 1 }; + + let apply_range_attr = |idx: AttributePlace, scalar: rustc_target::abi::Scalar| { + if cx.sess().opts.optimize != config::OptLevel::No + && llvm_util::get_version() >= (19, 0, 0) + && matches!(scalar.primitive(), Int(..)) + // If the value is a boolean, the range is 0..2 and that ultimately + // become 0..0 when the type becomes i1, which would be rejected + // by the LLVM verifier. + && !scalar.is_bool() + // LLVM also rejects full range. + && !scalar.is_always_valid(cx) + { + attributes::apply_to_llfn( + llfn, + idx, + &[llvm::CreateRangeAttr(cx.llcx, scalar.size(cx), scalar.valid_range(cx))], + ); + } + }; + match &self.ret.mode { PassMode::Direct(attrs) => { attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn); + if let abi::Abi::Scalar(scalar) = self.ret.layout.abi { + apply_range_attr(llvm::AttributePlace::ReturnValue, scalar); + } } PassMode::Indirect { attrs, meta_attrs: _, on_stack } => { assert!(!on_stack); @@ -427,9 +465,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { cx.type_array(cx.type_i8(), self.ret.layout.size.bytes()), ); attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[sret]); - if cx.sess().opts.optimize != config::OptLevel::No - && llvm_util::get_version() >= (18, 0, 0) - { + if cx.sess().opts.optimize != config::OptLevel::No { attributes::apply_to_llfn( llfn, llvm::AttributePlace::Argument(i), @@ -456,8 +492,13 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { ); attributes::apply_to_llfn(llfn, llvm::AttributePlace::Argument(i), &[byval]); } - PassMode::Direct(attrs) - | PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => { + PassMode::Direct(attrs) => { + let i = apply(attrs); + if let abi::Abi::Scalar(scalar) = arg.layout.abi { + apply_range_attr(llvm::AttributePlace::Argument(i), scalar); + } + } + PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => { apply(attrs); } PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => { @@ -466,8 +507,12 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { apply(meta_attrs); } PassMode::Pair(a, b) => { - apply(a); - apply(b); + let i = apply(a); + let ii = apply(b); + if let abi::Abi::ScalarPair(scalar_a, scalar_b) = arg.layout.abi { + apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a); + apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b); + } } PassMode::Cast { cast, pad_i32 } => { if *pad_i32 { @@ -477,6 +522,11 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } } } + + // If the declaration has an associated instance, compute extra attributes based on that. + if let Some(instance) = instance { + llfn_attrs_from_instance(cx, llfn, instance); + } } fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) { @@ -517,15 +567,18 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } _ => {} } - if let abi::Abi::Scalar(scalar) = self.ret.layout.abi { - // If the value is a boolean, the range is 0..2 and that ultimately - // become 0..0 when the type becomes i1, which would be rejected - // by the LLVM verifier. - if let Int(..) = scalar.primitive() { - if !scalar.is_bool() && !scalar.is_always_valid(bx) { - bx.range_metadata(callsite, scalar.valid_range(bx)); - } - } + if bx.cx.sess().opts.optimize != config::OptLevel::No + && llvm_util::get_version() < (19, 0, 0) + && let abi::Abi::Scalar(scalar) = self.ret.layout.abi + && matches!(scalar.primitive(), Int(..)) + // If the value is a boolean, the range is 0..2 and that ultimately + // become 0..0 when the type becomes i1, which would be rejected + // by the LLVM verifier. + && !scalar.is_bool() + // LLVM also rejects full range. + && !scalar.is_always_valid(bx) + { + bx.range_metadata(callsite, scalar.valid_range(bx)); } for arg in self.args.iter() { match &arg.mode { diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index 5969d9b9144..b4f3784a31a 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -1,4 +1,3 @@ -use crate::attributes; use libc::c_uint; use rustc_ast::expand::allocator::{ alloc_error_handler_name, default_fn_name, global_fn_name, AllocatorKind, AllocatorTy, @@ -8,9 +7,8 @@ use rustc_middle::bug; use rustc_middle::ty::TyCtxt; use rustc_session::config::{DebugInfo, OomStrategy}; -use crate::debuginfo; use crate::llvm::{self, Context, False, Module, True, Type}; -use crate::ModuleLlvm; +use crate::{attributes, debuginfo, ModuleLlvm}; pub(crate) unsafe fn codegen( tcx: TyCtxt<'_>, @@ -151,7 +149,7 @@ fn create_wrapper_function( } llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden); - let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, c"entry".as_ptr().cast()); + let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, c"entry".as_ptr()); let llbuilder = llvm::LLVMCreateBuilderInContext(llcx); llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb); diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 597ebd97365..430ba735243 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -1,25 +1,26 @@ -use crate::attributes; -use crate::builder::Builder; -use crate::common::Funclet; -use crate::context::CodegenCx; -use crate::llvm; -use crate::type_::Type; -use crate::type_of::LayoutLlvmExt; -use crate::value::Value; +use std::assert_matches::assert_matches; +use libc::{c_char, c_uint}; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_codegen_ssa::mir::operand::OperandValue; use rustc_codegen_ssa::traits::*; use rustc_data_structures::fx::FxHashMap; use rustc_middle::ty::layout::TyAndLayout; -use rustc_middle::{bug, span_bug, ty::Instance}; +use rustc_middle::ty::Instance; +use rustc_middle::{bug, span_bug}; use rustc_span::{sym, Pos, Span, Symbol}; use rustc_target::abi::*; use rustc_target::asm::*; +use smallvec::SmallVec; use tracing::debug; -use libc::{c_char, c_uint}; -use smallvec::SmallVec; +use crate::builder::Builder; +use crate::common::Funclet; +use crate::context::CodegenCx; +use crate::type_::Type; +use crate::type_of::LayoutLlvmExt; +use crate::value::Value; +use crate::{attributes, llvm}; impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { fn codegen_inline_asm( @@ -90,7 +91,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { // if the target feature needed by the register class is // disabled. This is necessary otherwise LLVM will try // to actually allocate a register for the dummy output. - assert!(matches!(reg, InlineAsmRegOrRegClass::Reg(_))); + assert_matches!(reg, InlineAsmRegOrRegClass::Reg(_)); clobbers.push(format!("~{}", reg_to_llvm(reg, None))); continue; } else { @@ -355,7 +356,7 @@ impl<'ll, 'tcx> AsmBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } } -impl<'tcx> AsmMethods<'tcx> for CodegenCx<'_, 'tcx> { +impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> { fn codegen_global_asm( &self, template: &[InlineAsmTemplatePiece], @@ -519,24 +520,16 @@ pub(crate) fn inline_asm_call<'ll>( /// If the register is an xmm/ymm/zmm register then return its index. fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> { + use X86InlineAsmReg::*; match reg { - InlineAsmReg::X86(reg) - if reg as u32 >= X86InlineAsmReg::xmm0 as u32 - && reg as u32 <= X86InlineAsmReg::xmm15 as u32 => - { - Some(reg as u32 - X86InlineAsmReg::xmm0 as u32) + InlineAsmReg::X86(reg) if reg as u32 >= xmm0 as u32 && reg as u32 <= xmm15 as u32 => { + Some(reg as u32 - xmm0 as u32) } - InlineAsmReg::X86(reg) - if reg as u32 >= X86InlineAsmReg::ymm0 as u32 - && reg as u32 <= X86InlineAsmReg::ymm15 as u32 => - { - Some(reg as u32 - X86InlineAsmReg::ymm0 as u32) + InlineAsmReg::X86(reg) if reg as u32 >= ymm0 as u32 && reg as u32 <= ymm15 as u32 => { + Some(reg as u32 - ymm0 as u32) } - InlineAsmReg::X86(reg) - if reg as u32 >= X86InlineAsmReg::zmm0 as u32 - && reg as u32 <= X86InlineAsmReg::zmm31 as u32 => - { - Some(reg as u32 - X86InlineAsmReg::zmm0 as u32) + InlineAsmReg::X86(reg) if reg as u32 >= zmm0 as u32 && reg as u32 <= zmm31 as u32 => { + Some(reg as u32 - zmm0 as u32) } _ => None, } @@ -544,50 +537,56 @@ fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> { /// If the register is an AArch64 integer register then return its index. fn a64_reg_index(reg: InlineAsmReg) -> Option<u32> { - match reg { - InlineAsmReg::AArch64(AArch64InlineAsmReg::x0) => Some(0), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x1) => Some(1), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x2) => Some(2), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x3) => Some(3), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x4) => Some(4), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x5) => Some(5), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x6) => Some(6), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x7) => Some(7), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x8) => Some(8), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x9) => Some(9), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x10) => Some(10), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x11) => Some(11), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x12) => Some(12), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x13) => Some(13), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x14) => Some(14), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x15) => Some(15), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x16) => Some(16), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x17) => Some(17), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x18) => Some(18), - // x19 is reserved - InlineAsmReg::AArch64(AArch64InlineAsmReg::x20) => Some(20), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x21) => Some(21), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x22) => Some(22), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x23) => Some(23), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x24) => Some(24), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x25) => Some(25), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x26) => Some(26), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x27) => Some(27), - InlineAsmReg::AArch64(AArch64InlineAsmReg::x28) => Some(28), - // x29 is reserved - InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) => Some(30), - _ => None, - } + use AArch64InlineAsmReg::*; + // Unlike `a64_vreg_index`, we can't subtract `x0` to get the u32 because + // `x19` and `x29` are missing and the integer constants for the + // `x0`..`x30` enum variants don't all match the register number. E.g. the + // integer constant for `x18` is 18, but the constant for `x20` is 19. + Some(match reg { + InlineAsmReg::AArch64(r) => match r { + x0 => 0, + x1 => 1, + x2 => 2, + x3 => 3, + x4 => 4, + x5 => 5, + x6 => 6, + x7 => 7, + x8 => 8, + x9 => 9, + x10 => 10, + x11 => 11, + x12 => 12, + x13 => 13, + x14 => 14, + x15 => 15, + x16 => 16, + x17 => 17, + x18 => 18, + // x19 is reserved + x20 => 20, + x21 => 21, + x22 => 22, + x23 => 23, + x24 => 24, + x25 => 25, + x26 => 26, + x27 => 27, + x28 => 28, + // x29 is reserved + x30 => 30, + _ => return None, + }, + _ => return None, + }) } /// If the register is an AArch64 vector register then return its index. fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> { + use AArch64InlineAsmReg::*; match reg { - InlineAsmReg::AArch64(reg) - if reg as u32 >= AArch64InlineAsmReg::v0 as u32 - && reg as u32 <= AArch64InlineAsmReg::v31 as u32 => - { - Some(reg as u32 - AArch64InlineAsmReg::v0 as u32) + InlineAsmReg::AArch64(reg) if reg as u32 >= v0 as u32 && reg as u32 <= v31 as u32 => { + Some(reg as u32 - v0 as u32) } _ => None, } @@ -595,6 +594,7 @@ fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> { /// Converts a register class to an LLVM constraint code. fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String { + use InlineAsmRegClass::*; match reg { // For vector registers LLVM wants the register name to match the type size. InlineAsmRegOrRegClass::Reg(reg) => { @@ -651,75 +651,66 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> // The constraints can be retrieved from // https://llvm.org/docs/LangRef.html#supported-constraint-code-list InlineAsmRegOrRegClass::RegClass(reg) => match reg { - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r", - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w", - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x", - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => { - unreachable!("clobber-only") - } - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r", - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t", - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x", - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w", - InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r", - InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r", - InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f", - InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r", - InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f", - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h", - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r", - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l", - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r", - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b", - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f", - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) - | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { + AArch64(AArch64InlineAsmRegClass::reg) => "r", + AArch64(AArch64InlineAsmRegClass::vreg) => "w", + AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x", + AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"), + Arm(ArmInlineAsmRegClass::reg) => "r", + Arm(ArmInlineAsmRegClass::sreg) + | Arm(ArmInlineAsmRegClass::dreg_low16) + | Arm(ArmInlineAsmRegClass::qreg_low8) => "t", + Arm(ArmInlineAsmRegClass::sreg_low16) + | Arm(ArmInlineAsmRegClass::dreg_low8) + | Arm(ArmInlineAsmRegClass::qreg_low4) => "x", + Arm(ArmInlineAsmRegClass::dreg) | Arm(ArmInlineAsmRegClass::qreg) => "w", + Hexagon(HexagonInlineAsmRegClass::reg) => "r", + LoongArch(LoongArchInlineAsmRegClass::reg) => "r", + LoongArch(LoongArchInlineAsmRegClass::freg) => "f", + Mips(MipsInlineAsmRegClass::reg) => "r", + Mips(MipsInlineAsmRegClass::freg) => "f", + Nvptx(NvptxInlineAsmRegClass::reg16) => "h", + Nvptx(NvptxInlineAsmRegClass::reg32) => "r", + Nvptx(NvptxInlineAsmRegClass::reg64) => "l", + PowerPC(PowerPCInlineAsmRegClass::reg) => "r", + PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b", + PowerPC(PowerPCInlineAsmRegClass::freg) => "f", + PowerPC(PowerPCInlineAsmRegClass::cr) | PowerPC(PowerPCInlineAsmRegClass::xer) => { unreachable!("clobber-only") } - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r", - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f", - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => { - unreachable!("clobber-only") - } - InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r", - InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q", - InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q", - InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg) - | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x", - InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v", - InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk", - InlineAsmRegClass::X86( + RiscV(RiscVInlineAsmRegClass::reg) => "r", + RiscV(RiscVInlineAsmRegClass::freg) => "f", + RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"), + X86(X86InlineAsmRegClass::reg) => "r", + X86(X86InlineAsmRegClass::reg_abcd) => "Q", + X86(X86InlineAsmRegClass::reg_byte) => "q", + X86(X86InlineAsmRegClass::xmm_reg) | X86(X86InlineAsmRegClass::ymm_reg) => "x", + X86(X86InlineAsmRegClass::zmm_reg) => "v", + X86(X86InlineAsmRegClass::kreg) => "^Yk", + X86( X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::kreg0 | X86InlineAsmRegClass::tmm_reg, ) => unreachable!("clobber-only"), - InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r", - InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r", - InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w", - InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r", - InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d", - InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r", - InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w", - InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e", - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r", - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a", - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f", - InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r", - InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r", - InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a", - InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d", - InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r", - InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f", - InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { - bug!("LLVM backend does not support SPIR-V") - } - InlineAsmRegClass::Err => unreachable!(), + Wasm(WasmInlineAsmRegClass::local) => "r", + Bpf(BpfInlineAsmRegClass::reg) => "r", + Bpf(BpfInlineAsmRegClass::wreg) => "w", + Avr(AvrInlineAsmRegClass::reg) => "r", + Avr(AvrInlineAsmRegClass::reg_upper) => "d", + Avr(AvrInlineAsmRegClass::reg_pair) => "r", + Avr(AvrInlineAsmRegClass::reg_iw) => "w", + Avr(AvrInlineAsmRegClass::reg_ptr) => "e", + S390x(S390xInlineAsmRegClass::reg) => "r", + S390x(S390xInlineAsmRegClass::reg_addr) => "a", + S390x(S390xInlineAsmRegClass::freg) => "f", + Msp430(Msp430InlineAsmRegClass::reg) => "r", + M68k(M68kInlineAsmRegClass::reg) => "r", + M68k(M68kInlineAsmRegClass::reg_addr) => "a", + M68k(M68kInlineAsmRegClass::reg_data) => "d", + CSKY(CSKYInlineAsmRegClass::reg) => "r", + CSKY(CSKYInlineAsmRegClass::freg) => "f", + SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"), + Err => unreachable!(), } .to_string(), } @@ -731,44 +722,41 @@ fn modifier_to_llvm( reg: InlineAsmRegClass, modifier: Option<char>, ) -> Option<char> { + use InlineAsmRegClass::*; // The modifiers can be retrieved from // https://llvm.org/docs/LangRef.html#asm-template-argument-modifiers match reg { - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier, - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) - | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { - if modifier == Some('v') { None } else { modifier } - } - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => { - unreachable!("clobber-only") + AArch64(AArch64InlineAsmRegClass::reg) => modifier, + AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => { + if modifier == Some('v') { + None + } else { + modifier + } } - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None, - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None, - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'), - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => { + AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"), + Arm(ArmInlineAsmRegClass::reg) => None, + Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => None, + Arm(ArmInlineAsmRegClass::dreg) + | Arm(ArmInlineAsmRegClass::dreg_low16) + | Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'), + Arm(ArmInlineAsmRegClass::qreg) + | Arm(ArmInlineAsmRegClass::qreg_low8) + | Arm(ArmInlineAsmRegClass::qreg_low4) => { if modifier.is_none() { Some('q') } else { modifier } } - InlineAsmRegClass::Hexagon(_) => None, - InlineAsmRegClass::LoongArch(_) => None, - InlineAsmRegClass::Mips(_) => None, - InlineAsmRegClass::Nvptx(_) => None, - InlineAsmRegClass::PowerPC(_) => None, - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) - | InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None, - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => { - unreachable!("clobber-only") - } - InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) - | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier { + Hexagon(_) => None, + LoongArch(_) => None, + Mips(_) => None, + Nvptx(_) => None, + PowerPC(_) => None, + RiscV(RiscVInlineAsmRegClass::reg) | RiscV(RiscVInlineAsmRegClass::freg) => None, + RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"), + X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => match modifier { None if arch == InlineAsmArch::X86_64 => Some('q'), None => Some('k'), Some('l') => Some('b'), @@ -778,10 +766,10 @@ fn modifier_to_llvm( Some('r') => Some('q'), _ => unreachable!(), }, - InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None, - InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg) - | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg) - | InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) { + X86(X86InlineAsmRegClass::reg_byte) => None, + X86(reg @ X86InlineAsmRegClass::xmm_reg) + | X86(reg @ X86InlineAsmRegClass::ymm_reg) + | X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) { (X86InlineAsmRegClass::xmm_reg, None) => Some('x'), (X86InlineAsmRegClass::ymm_reg, None) => Some('t'), (X86InlineAsmRegClass::zmm_reg, None) => Some('g'), @@ -790,116 +778,97 @@ fn modifier_to_llvm( (_, Some('z')) => Some('g'), _ => unreachable!(), }, - InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None, - InlineAsmRegClass::X86( + X86(X86InlineAsmRegClass::kreg) => None, + X86( X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::kreg0 | X86InlineAsmRegClass::tmm_reg, - ) => { - unreachable!("clobber-only") - } - InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None, - InlineAsmRegClass::Bpf(_) => None, - InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) - | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) - | InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier { + ) => unreachable!("clobber-only"), + Wasm(WasmInlineAsmRegClass::local) => None, + Bpf(_) => None, + Avr(AvrInlineAsmRegClass::reg_pair) + | Avr(AvrInlineAsmRegClass::reg_iw) + | Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier { Some('h') => Some('B'), Some('l') => Some('A'), _ => None, }, - InlineAsmRegClass::Avr(_) => None, - InlineAsmRegClass::S390x(_) => None, - InlineAsmRegClass::Msp430(_) => None, - InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { - bug!("LLVM backend does not support SPIR-V") - } - InlineAsmRegClass::M68k(_) => None, - InlineAsmRegClass::CSKY(_) => None, - InlineAsmRegClass::Err => unreachable!(), + Avr(_) => None, + S390x(_) => None, + Msp430(_) => None, + SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"), + M68k(_) => None, + CSKY(_) => None, + Err => unreachable!(), } } /// Type to use for outputs that are discarded. It doesn't really matter what /// the type is, as long as it is valid for the constraint code. fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type { + use InlineAsmRegClass::*; match reg { - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(), - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) - | InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => { + AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(), + AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => { cx.type_vector(cx.type_i64(), 2) } - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => { + AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"), + Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(), + Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(), + Arm(ArmInlineAsmRegClass::dreg) + | Arm(ArmInlineAsmRegClass::dreg_low16) + | Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(), + Arm(ArmInlineAsmRegClass::qreg) + | Arm(ArmInlineAsmRegClass::qreg_low8) + | Arm(ArmInlineAsmRegClass::qreg_low4) => cx.type_vector(cx.type_i64(), 2), + Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(), + LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(), + LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(), + Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(), + Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(), + Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(), + Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(), + Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(), + PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(), + PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(), + PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(), + PowerPC(PowerPCInlineAsmRegClass::cr) | PowerPC(PowerPCInlineAsmRegClass::xer) => { unreachable!("clobber-only") } - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(), - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(), - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(), - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) - | InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => { - cx.type_vector(cx.type_i64(), 2) - } - InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(), - InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(), - InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(), - InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(), - InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(), - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(), - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(), - InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(), - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(), - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(), - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(), - InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr) - | InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => { - unreachable!("clobber-only") - } - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(), - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(), - InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => { - unreachable!("clobber-only") - } - InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) - | InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(), - InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(), - InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg) - | InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) - | InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(), - InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(), - InlineAsmRegClass::X86( + RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(), + RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(), + RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"), + X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(), + X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(), + X86(X86InlineAsmRegClass::xmm_reg) + | X86(X86InlineAsmRegClass::ymm_reg) + | X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(), + X86(X86InlineAsmRegClass::kreg) => cx.type_i16(), + X86( X86InlineAsmRegClass::x87_reg | X86InlineAsmRegClass::mmx_reg | X86InlineAsmRegClass::kreg0 | X86InlineAsmRegClass::tmm_reg, - ) => { - unreachable!("clobber-only") - } - InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(), - InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(), - InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(), - InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(), - InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(), - InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(), - InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(), - InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(), - InlineAsmRegClass::S390x( - S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr, - ) => cx.type_i32(), - InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(), - InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(), - InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(), - InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(), - InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(), - InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(), - InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(), - InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { - bug!("LLVM backend does not support SPIR-V") - } - InlineAsmRegClass::Err => unreachable!(), + ) => unreachable!("clobber-only"), + Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(), + Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(), + Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(), + Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(), + Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(), + Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(), + Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(), + Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(), + S390x(S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr) => cx.type_i32(), + S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(), + Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(), + M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(), + M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(), + M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(), + CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(), + CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(), + SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"), + Err => unreachable!(), } } @@ -912,8 +881,10 @@ fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Ty Primitive::Int(Integer::I16, _) => cx.type_i16(), Primitive::Int(Integer::I32, _) => cx.type_i32(), Primitive::Int(Integer::I64, _) => cx.type_i64(), + Primitive::Float(Float::F16) => cx.type_f16(), Primitive::Float(Float::F32) => cx.type_f32(), Primitive::Float(Float::F64) => cx.type_f64(), + Primitive::Float(Float::F128) => cx.type_f128(), // FIXME(erikdesjardins): handle non-default addrspace ptr sizes Primitive::Pointer(_) => cx.type_from_integer(dl.ptr_sized_integer()), _ => unreachable!(), @@ -937,9 +908,10 @@ fn llvm_fixup_input<'ll, 'tcx>( layout: &TyAndLayout<'tcx>, instance: Instance<'_>, ) -> &'ll Value { + use InlineAsmRegClass::*; let dl = &bx.tcx.data_layout; match (reg, layout.abi) { - (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8); bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) @@ -947,7 +919,9 @@ fn llvm_fixup_input<'ll, 'tcx>( value } } - (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => { + (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + if s.primitive() != Primitive::Float(Float::F128) => + { let elem_ty = llvm_asm_scalar_type(bx.cx, s); let count = 16 / layout.size.bytes(); let vec_ty = bx.cx.type_vector(elem_ty, count); @@ -958,26 +932,25 @@ fn llvm_fixup_input<'ll, 'tcx>( } bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) } - ( - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), - Abi::Vector { element, count }, - ) if layout.size.bytes() == 8 => { + (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + if layout.size.bytes() == 8 => + { let elem_ty = llvm_asm_scalar_type(bx.cx, element); let vec_ty = bx.cx.type_vector(elem_ty, count); let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } - (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { bx.bitcast(value, bx.cx.type_i64()) } ( - InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), + X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), Abi::Vector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)), ( - InlineAsmRegClass::X86( + X86( X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, @@ -989,7 +962,7 @@ fn llvm_fixup_input<'ll, 'tcx>( bx.bitcast(value, bx.type_vector(bx.type_i32(), 4)) } ( - InlineAsmRegClass::X86( + X86( X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, @@ -1004,7 +977,7 @@ fn llvm_fixup_input<'ll, 'tcx>( bx.bitcast(value, bx.type_vector(bx.type_i16(), 8)) } ( - InlineAsmRegClass::X86( + X86( X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, @@ -1013,10 +986,7 @@ fn llvm_fixup_input<'ll, 'tcx>( ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } - ( - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), - Abi::Scalar(s), - ) => { + (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_f32()) } else { @@ -1024,7 +994,7 @@ fn llvm_fixup_input<'ll, 'tcx>( } } ( - InlineAsmRegClass::Arm( + Arm( ArmInlineAsmRegClass::dreg | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, @@ -1038,7 +1008,7 @@ fn llvm_fixup_input<'ll, 'tcx>( } } ( - InlineAsmRegClass::Arm( + Arm( ArmInlineAsmRegClass::dreg | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16 @@ -1050,7 +1020,7 @@ fn llvm_fixup_input<'ll, 'tcx>( ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } - (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()), @@ -1059,7 +1029,7 @@ fn llvm_fixup_input<'ll, 'tcx>( _ => value, } } - (InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) => { @@ -1081,41 +1051,43 @@ fn llvm_fixup_output<'ll, 'tcx>( layout: &TyAndLayout<'tcx>, instance: Instance<'_>, ) -> &'ll Value { + use InlineAsmRegClass::*; match (reg, layout.abi) { - (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { bx.extract_element(value, bx.const_i32(0)) } else { value } } - (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => { + (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + if s.primitive() != Primitive::Float(Float::F128) => + { value = bx.extract_element(value, bx.const_i32(0)); if let Primitive::Pointer(_) = s.primitive() { value = bx.inttoptr(value, layout.llvm_type(bx.cx)); } value } - ( - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), - Abi::Vector { element, count }, - ) if layout.size.bytes() == 8 => { + (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + if layout.size.bytes() == 8 => + { let elem_ty = llvm_asm_scalar_type(bx.cx, element); let vec_ty = bx.cx.type_vector(elem_ty, count * 2); let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } - (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { bx.bitcast(value, bx.cx.type_f64()) } ( - InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), + X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), Abi::Vector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)), ( - InlineAsmRegClass::X86( + X86( X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, @@ -1127,7 +1099,7 @@ fn llvm_fixup_output<'ll, 'tcx>( bx.bitcast(value, bx.type_f128()) } ( - InlineAsmRegClass::X86( + X86( X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, @@ -1138,7 +1110,7 @@ fn llvm_fixup_output<'ll, 'tcx>( bx.extract_element(value, bx.const_usize(0)) } ( - InlineAsmRegClass::X86( + X86( X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, @@ -1147,10 +1119,7 @@ fn llvm_fixup_output<'ll, 'tcx>( ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } - ( - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), - Abi::Scalar(s), - ) => { + (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_i32()) } else { @@ -1158,7 +1127,7 @@ fn llvm_fixup_output<'ll, 'tcx>( } } ( - InlineAsmRegClass::Arm( + Arm( ArmInlineAsmRegClass::dreg | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, @@ -1172,7 +1141,7 @@ fn llvm_fixup_output<'ll, 'tcx>( } } ( - InlineAsmRegClass::Arm( + Arm( ArmInlineAsmRegClass::dreg | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16 @@ -1184,7 +1153,7 @@ fn llvm_fixup_output<'ll, 'tcx>( ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } - (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()), @@ -1194,7 +1163,7 @@ fn llvm_fixup_output<'ll, 'tcx>( _ => value, } } - (InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) => { @@ -1213,37 +1182,39 @@ fn llvm_fixup_output_type<'ll, 'tcx>( layout: &TyAndLayout<'tcx>, instance: Instance<'_>, ) -> &'ll Type { + use InlineAsmRegClass::*; match (reg, layout.abi) { - (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { cx.type_vector(cx.type_i8(), 8) } else { layout.llvm_type(cx) } } - (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => { + (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + if s.primitive() != Primitive::Float(Float::F128) => + { let elem_ty = llvm_asm_scalar_type(cx, s); let count = 16 / layout.size.bytes(); cx.type_vector(elem_ty, count) } - ( - InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), - Abi::Vector { element, count }, - ) if layout.size.bytes() == 8 => { + (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + if layout.size.bytes() == 8 => + { let elem_ty = llvm_asm_scalar_type(cx, element); cx.type_vector(elem_ty, count * 2) } - (InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { cx.type_i64() } ( - InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), + X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), Abi::Vector { .. }, ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8), ( - InlineAsmRegClass::X86( + X86( X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, @@ -1255,7 +1226,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( cx.type_vector(cx.type_i32(), 4) } ( - InlineAsmRegClass::X86( + X86( X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, @@ -1263,7 +1234,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( Abi::Scalar(s), ) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8), ( - InlineAsmRegClass::X86( + X86( X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, @@ -1272,10 +1243,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } - ( - InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), - Abi::Scalar(s), - ) => { + (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { cx.type_f32() } else { @@ -1283,7 +1251,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( } } ( - InlineAsmRegClass::Arm( + Arm( ArmInlineAsmRegClass::dreg | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, @@ -1297,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( } } ( - InlineAsmRegClass::Arm( + Arm( ArmInlineAsmRegClass::dreg | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16 @@ -1309,7 +1277,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } - (InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(), @@ -1318,7 +1286,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( _ => layout.llvm_type(cx), } } - (InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) => { diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index 3877460fcdb..6df63eec513 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -1,31 +1,29 @@ //! Set and unset common attributes on LLVM values. +use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr}; use rustc_codegen_ssa::traits::*; use rustc_hir::def_id::DefId; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, PatchableFunctionEntry}; use rustc_middle::ty::{self, TyCtxt}; -use rustc_session::config::{FunctionReturn, OptLevel}; +use rustc_session::config::{BranchProtection, FunctionReturn, OptLevel, PAuthKey, PacRet}; use rustc_span::symbol::sym; use rustc_target::spec::{FramePointer, SanitizerSet, StackProbeType, StackProtector}; use smallvec::SmallVec; -use crate::attributes; +use crate::context::CodegenCx; use crate::errors::{MissingFeatures, SanitizerMemtagRequiresMte, TargetFeatureDisableOrEnable}; use crate::llvm::AttributePlace::Function; use crate::llvm::{self, AllocKindFlags, Attribute, AttributeKind, AttributePlace, MemoryEffects}; -use crate::llvm_util; -pub use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr}; - -use crate::context::CodegenCx; use crate::value::Value; +use crate::{attributes, llvm_util}; -pub fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) { +pub(crate) fn apply_to_llfn(llfn: &Value, idx: AttributePlace, attrs: &[&Attribute]) { if !attrs.is_empty() { llvm::AddFunctionAttributes(llfn, idx, attrs); } } -pub fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) { +pub(crate) fn apply_to_callsite(callsite: &Value, idx: AttributePlace, attrs: &[&Attribute]) { if !attrs.is_empty() { llvm::AddCallSiteAttributes(callsite, idx, attrs); } @@ -82,7 +80,7 @@ fn patchable_function_entry_attrs<'ll>( /// Get LLVM sanitize attributes. #[inline] -pub fn sanitize_attrs<'ll>( +pub(crate) fn sanitize_attrs<'ll>( cx: &CodegenCx<'ll, '_>, no_sanitize: SanitizerSet, ) -> SmallVec<[&'ll Attribute; 4]> { @@ -122,7 +120,7 @@ pub fn sanitize_attrs<'ll>( /// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function. #[inline] -pub fn uwtable_attr(llcx: &llvm::Context, use_sync_unwind: Option<bool>) -> &Attribute { +pub(crate) fn uwtable_attr(llcx: &llvm::Context, use_sync_unwind: Option<bool>) -> &Attribute { // NOTE: We should determine if we even need async unwind tables, as they // take have more overhead and if we can use sync unwind tables we // probably should. @@ -130,7 +128,7 @@ pub fn uwtable_attr(llcx: &llvm::Context, use_sync_unwind: Option<bool>) -> &Att llvm::CreateUWTableAttr(llcx, async_unwind) } -pub fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { +pub(crate) fn frame_pointer_type_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { let mut fp = cx.sess().target.frame_pointer; let opts = &cx.sess().opts; // "mcount" function relies on stack pointer. @@ -282,19 +280,19 @@ fn backchain_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { if found_positive { Some(llvm::CreateAttrString(cx.llcx, "backchain")) } else { None } } -pub fn target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Attribute { +pub(crate) fn target_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Attribute { let target_cpu = llvm_util::target_cpu(cx.tcx.sess); llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu) } -pub fn tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { +pub(crate) fn tune_cpu_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { llvm_util::tune_cpu(cx.tcx.sess) .map(|tune_cpu| llvm::CreateAttrStringValue(cx.llcx, "tune-cpu", tune_cpu)) } /// Get the `NonLazyBind` LLVM attribute, /// if the codegen options allow skipping the PLT. -pub fn non_lazy_bind_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { +pub(crate) fn non_lazy_bind_attr<'ll>(cx: &CodegenCx<'ll, '_>) -> Option<&'ll Attribute> { // Don't generate calls through PLT if it's not necessary if !cx.sess().needs_plt() { Some(AttributeKind::NonLazyBind.create_attr(cx.llcx)) @@ -326,9 +324,10 @@ fn create_alloc_family_attr(llcx: &llvm::Context) -> &llvm::Attribute { llvm::CreateAttrStringValue(llcx, "alloc-family", "__rust_alloc") } +/// Helper for `FnAbi::apply_attrs_llfn`: /// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`) /// attributes. -pub fn from_fn_attrs<'ll, 'tcx>( +pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value, instance: ty::Instance<'tcx>, @@ -404,11 +403,42 @@ pub fn from_fn_attrs<'ll, 'tcx>( if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { to_add.push(AttributeKind::Naked.create_attr(cx.llcx)); // HACK(jubilee): "indirect branch tracking" works by attaching prologues to functions. - // And it is a module-level attribute, so the alternative is pulling naked functions into new LLVM modules. - // Otherwise LLVM's "naked" functions come with endbr prefixes per https://github.com/rust-lang/rust/issues/98768 + // And it is a module-level attribute, so the alternative is pulling naked functions into + // new LLVM modules. Otherwise LLVM's "naked" functions come with endbr prefixes per + // https://github.com/rust-lang/rust/issues/98768 to_add.push(AttributeKind::NoCfCheck.create_attr(cx.llcx)); - // Need this for AArch64. - to_add.push(llvm::CreateAttrStringValue(cx.llcx, "branch-target-enforcement", "false")); + if llvm_util::get_version() < (19, 0, 0) { + // Prior to LLVM 19, branch-target-enforcement was disabled by setting the attribute to + // the string "false". Now it is disabled by absence of the attribute. + to_add.push(llvm::CreateAttrStringValue(cx.llcx, "branch-target-enforcement", "false")); + } + } else { + // Do not set sanitizer attributes for naked functions. + to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize)); + + if llvm_util::get_version() >= (19, 0, 0) { + // For non-naked functions, set branch protection attributes on aarch64. + if let Some(BranchProtection { bti, pac_ret }) = + cx.sess().opts.unstable_opts.branch_protection + { + assert!(cx.sess().target.arch == "aarch64"); + if bti { + to_add.push(llvm::CreateAttrString(cx.llcx, "branch-target-enforcement")); + } + if let Some(PacRet { leaf, key }) = pac_ret { + to_add.push(llvm::CreateAttrStringValue( + cx.llcx, + "sign-return-address", + if leaf { "all" } else { "non-leaf" }, + )); + to_add.push(llvm::CreateAttrStringValue( + cx.llcx, + "sign-return-address-key", + if key == PAuthKey::A { "a_key" } else { "b_key" }, + )); + } + } + } } if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR_ZEROED) @@ -425,7 +455,8 @@ pub fn from_fn_attrs<'ll, 'tcx>( flags |= AllocKindFlags::Zeroed; } to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags)); - // apply to return place instead of function (unlike all other attributes applied in this function) + // apply to return place instead of function (unlike all other attributes applied in this + // function) let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx); attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]); } @@ -461,7 +492,6 @@ pub fn from_fn_attrs<'ll, 'tcx>( if let Some(backchain) = backchain_attr(cx) { to_add.push(backchain); } - to_add.extend(sanitize_attrs(cx, codegen_fn_attrs.no_sanitize)); to_add.extend(patchable_function_entry_attrs(cx, codegen_fn_attrs.patchable_function_entry)); // Always annotate functions with the target-cpu they are compiled for. @@ -473,7 +503,7 @@ pub fn from_fn_attrs<'ll, 'tcx>( to_add.extend(tune_cpu_attr(cx)); let function_features = - codegen_fn_attrs.target_features.iter().map(|f| f.as_str()).collect::<Vec<&str>>(); + codegen_fn_attrs.target_features.iter().map(|f| f.name.as_str()).collect::<Vec<&str>>(); if let Some(f) = llvm_util::check_tied_features( cx.tcx.sess, @@ -497,13 +527,20 @@ pub fn from_fn_attrs<'ll, 'tcx>( let function_features = function_features .iter() - .flat_map(|feat| { - llvm_util::to_llvm_features(cx.tcx.sess, feat).into_iter().map(|f| format!("+{f}")) - }) + // Convert to LLVMFeatures and filter out unavailable ones + .flat_map(|feat| llvm_util::to_llvm_features(cx.tcx.sess, feat)) + // Convert LLVMFeatures & dependencies to +<feats>s + .flat_map(|feat| feat.into_iter().map(|f| format!("+{f}"))) .chain(codegen_fn_attrs.instruction_set.iter().map(|x| match x { InstructionSetAttr::ArmA32 => "-thumb-mode".to_string(), InstructionSetAttr::ArmT32 => "+thumb-mode".to_string(), })) + // HACK: LLVM versions 19+ do not have the FPMR feature and treat it as always enabled + // It only exists as a feature in LLVM 18, cannot be passed down for any other version + .chain(match &*cx.tcx.sess.target.arch { + "aarch64" if llvm_util::get_version().0 == 18 => vec!["+fpmr".to_string()], + _ => vec![], + }) .collect::<Vec<String>>(); if cx.tcx.sess.target.is_like_wasm { diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index f46c6b1c498..4f2c83634a8 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -1,28 +1,18 @@ //! A helper class for dealing with static archives -use std::env; -use std::ffi::{c_char, c_void, CStr, CString, OsString}; -use std::io; -use std::mem; +use std::ffi::{c_char, c_void, CStr, CString}; use std::path::{Path, PathBuf}; -use std::ptr; -use std::str; +use std::{io, mem, ptr, str}; -use crate::common; -use crate::errors::{ - DlltoolFailImportLibrary, ErrorCallingDllTool, ErrorCreatingImportLibrary, ErrorWritingDEFFile, -}; -use crate::llvm::archive_ro::{ArchiveRO, Child}; -use crate::llvm::{self, ArchiveKind, LLVMMachineType, LLVMRustCOFFShortExport}; use rustc_codegen_ssa::back::archive::{ try_extract_macho_fat_archive, ArArchiveBuilder, ArchiveBuildFailure, ArchiveBuilder, ArchiveBuilderBuilder, ObjectReader, UnknownArchiveKind, DEFAULT_OBJECT_READER, }; -use tracing::trace; - -use rustc_session::cstore::DllImport; use rustc_session::Session; +use crate::llvm::archive_ro::{ArchiveRO, Child}; +use crate::llvm::{self, ArchiveKind}; + /// Helper for adding many files to an archive. #[must_use = "must call build() to finish building the archive"] pub(crate) struct LlvmArchiveBuilder<'a> { @@ -50,18 +40,6 @@ fn is_relevant_child(c: &Child<'_>) -> bool { } } -/// Map machine type strings to values of LLVM's MachineTypes enum. -fn llvm_machine_type(cpu: &str) -> LLVMMachineType { - match cpu { - "x86_64" => LLVMMachineType::AMD64, - "x86" => LLVMMachineType::I386, - "aarch64" => LLVMMachineType::ARM64, - "arm64ec" => LLVMMachineType::ARM64EC, - "arm" => LLVMMachineType::ARM, - _ => panic!("unsupported cpu type {cpu}"), - } -} - impl<'a> ArchiveBuilder for LlvmArchiveBuilder<'a> { fn add_archive( &mut self, @@ -101,192 +79,27 @@ impl<'a> ArchiveBuilder for LlvmArchiveBuilder<'a> { fn build(mut self: Box<Self>, output: &Path) -> bool { match self.build_with_llvm(output) { Ok(any_members) => any_members, - Err(e) => self.sess.dcx().emit_fatal(ArchiveBuildFailure { error: e }), + Err(error) => { + self.sess.dcx().emit_fatal(ArchiveBuildFailure { path: output.to_owned(), error }) + } } } } -pub struct LlvmArchiveBuilderBuilder; +pub(crate) struct LlvmArchiveBuilderBuilder; impl ArchiveBuilderBuilder for LlvmArchiveBuilderBuilder { fn new_archive_builder<'a>(&self, sess: &'a Session) -> Box<dyn ArchiveBuilder + 'a> { - // FIXME use ArArchiveBuilder on most targets again once reading thin archives is - // implemented - if true { + // Keeping LlvmArchiveBuilder around in case of a regression caused by using + // ArArchiveBuilder. + // FIXME(#128955) remove a couple of months after #128936 gets merged in case + // no regression is found. + if false { Box::new(LlvmArchiveBuilder { sess, additions: Vec::new() }) } else { Box::new(ArArchiveBuilder::new(sess, &LLVM_OBJECT_READER)) } } - - fn create_dll_import_lib( - &self, - sess: &Session, - lib_name: &str, - dll_imports: &[DllImport], - tmpdir: &Path, - is_direct_dependency: bool, - ) -> PathBuf { - let name_suffix = if is_direct_dependency { "_imports" } else { "_imports_indirect" }; - let output_path = tmpdir.join(format!("{lib_name}{name_suffix}.lib")); - - let target = &sess.target; - let mingw_gnu_toolchain = common::is_mingw_gnu_toolchain(target); - - let import_name_and_ordinal_vector: Vec<(String, Option<u16>)> = dll_imports - .iter() - .map(|import: &DllImport| { - if sess.target.arch == "x86" { - ( - common::i686_decorated_name(import, mingw_gnu_toolchain, false), - import.ordinal(), - ) - } else { - (import.name.to_string(), import.ordinal()) - } - }) - .collect(); - - if mingw_gnu_toolchain { - // The binutils linker used on -windows-gnu targets cannot read the import - // libraries generated by LLVM: in our attempts, the linker produced an .EXE - // that loaded but crashed with an AV upon calling one of the imported - // functions. Therefore, use binutils to create the import library instead, - // by writing a .DEF file to the temp dir and calling binutils's dlltool. - let def_file_path = tmpdir.join(format!("{lib_name}{name_suffix}.def")); - - let def_file_content = format!( - "EXPORTS\n{}", - import_name_and_ordinal_vector - .into_iter() - .map(|(name, ordinal)| { - match ordinal { - Some(n) => format!("{name} @{n} NONAME"), - None => name, - } - }) - .collect::<Vec<String>>() - .join("\n") - ); - - match std::fs::write(&def_file_path, def_file_content) { - Ok(_) => {} - Err(e) => { - sess.dcx().emit_fatal(ErrorWritingDEFFile { error: e }); - } - }; - - // --no-leading-underscore: For the `import_name_type` feature to work, we need to be - // able to control the *exact* spelling of each of the symbols that are being imported: - // hence we don't want `dlltool` adding leading underscores automatically. - let dlltool = find_binutils_dlltool(sess); - let temp_prefix = { - let mut path = PathBuf::from(&output_path); - path.pop(); - path.push(lib_name); - path - }; - // dlltool target architecture args from: - // https://github.com/llvm/llvm-project-release-prs/blob/llvmorg-15.0.6/llvm/lib/ToolDrivers/llvm-dlltool/DlltoolDriver.cpp#L69 - let (dlltool_target_arch, dlltool_target_bitness) = match sess.target.arch.as_ref() { - "x86_64" => ("i386:x86-64", "--64"), - "x86" => ("i386", "--32"), - "aarch64" => ("arm64", "--64"), - "arm" => ("arm", "--32"), - _ => panic!("unsupported arch {}", sess.target.arch), - }; - let mut dlltool_cmd = std::process::Command::new(&dlltool); - dlltool_cmd - .arg("-d") - .arg(def_file_path) - .arg("-D") - .arg(lib_name) - .arg("-l") - .arg(&output_path) - .arg("-m") - .arg(dlltool_target_arch) - .arg("-f") - .arg(dlltool_target_bitness) - .arg("--no-leading-underscore") - .arg("--temp-prefix") - .arg(temp_prefix); - - match dlltool_cmd.output() { - Err(e) => { - sess.dcx().emit_fatal(ErrorCallingDllTool { - dlltool_path: dlltool.to_string_lossy(), - error: e, - }); - } - // dlltool returns '0' on failure, so check for error output instead. - Ok(output) if !output.stderr.is_empty() => { - sess.dcx().emit_fatal(DlltoolFailImportLibrary { - dlltool_path: dlltool.to_string_lossy(), - dlltool_args: dlltool_cmd - .get_args() - .map(|arg| arg.to_string_lossy()) - .collect::<Vec<_>>() - .join(" "), - stdout: String::from_utf8_lossy(&output.stdout), - stderr: String::from_utf8_lossy(&output.stderr), - }) - } - _ => {} - } - } else { - // we've checked for \0 characters in the library name already - let dll_name_z = CString::new(lib_name).unwrap(); - - let output_path_z = rustc_fs_util::path_to_c_string(&output_path); - - trace!("invoking LLVMRustWriteImportLibrary"); - trace!(" dll_name {:#?}", dll_name_z); - trace!(" output_path {}", output_path.display()); - trace!( - " import names: {}", - dll_imports - .iter() - .map(|import| import.name.to_string()) - .collect::<Vec<_>>() - .join(", "), - ); - - // All import names are Rust identifiers and therefore cannot contain \0 characters. - // FIXME: when support for #[link_name] is implemented, ensure that the import names - // still don't contain any \0 characters. Also need to check that the names don't - // contain substrings like " @" or "NONAME" that are keywords or otherwise reserved - // in definition files. - let cstring_import_name_and_ordinal_vector: Vec<(CString, Option<u16>)> = - import_name_and_ordinal_vector - .into_iter() - .map(|(name, ordinal)| (CString::new(name).unwrap(), ordinal)) - .collect(); - - let ffi_exports: Vec<LLVMRustCOFFShortExport> = cstring_import_name_and_ordinal_vector - .iter() - .map(|(name_z, ordinal)| LLVMRustCOFFShortExport::new(name_z.as_ptr(), *ordinal)) - .collect(); - let result = unsafe { - crate::llvm::LLVMRustWriteImportLibrary( - dll_name_z.as_ptr(), - output_path_z.as_ptr(), - ffi_exports.as_ptr(), - ffi_exports.len(), - llvm_machine_type(&sess.target.arch) as u16, - !sess.target.is_like_msvc, - ) - }; - - if result == crate::llvm::LLVMRustResult::Failure { - sess.dcx().emit_fatal(ErrorCreatingImportLibrary { - lib_name, - error: llvm::last_error().unwrap_or("unknown LLVM error".to_string()), - }); - } - }; - - output_path - } } // The object crate doesn't know how to get symbols for LLVM bitcode and COFF bigobj files. @@ -299,25 +112,11 @@ static LLVM_OBJECT_READER: ObjectReader = ObjectReader { get_xcoff_member_alignment: DEFAULT_OBJECT_READER.get_xcoff_member_alignment, }; -fn should_use_llvm_reader(buf: &[u8]) -> bool { - let is_bitcode = unsafe { llvm::LLVMRustIsBitcode(buf.as_ptr(), buf.len()) }; - - // COFF bigobj file, msvc LTO file or import library. See - // https://github.com/llvm/llvm-project/blob/453f27bc9/llvm/lib/BinaryFormat/Magic.cpp#L38-L51 - let is_unsupported_windows_obj_file = buf.get(0..4) == Some(b"\0\0\xFF\xFF"); - - is_bitcode || is_unsupported_windows_obj_file -} - #[deny(unsafe_op_in_unsafe_fn)] fn get_llvm_object_symbols( buf: &[u8], f: &mut dyn FnMut(&[u8]) -> io::Result<()>, ) -> io::Result<bool> { - if !should_use_llvm_reader(buf) { - return (DEFAULT_OBJECT_READER.get_symbols)(buf, f); - } - let mut state = Box::new(f); let err = unsafe { @@ -354,18 +153,10 @@ fn get_llvm_object_symbols( } fn llvm_is_64_bit_object_file(buf: &[u8]) -> bool { - if !should_use_llvm_reader(buf) { - return (DEFAULT_OBJECT_READER.is_64_bit_object_file)(buf); - } - unsafe { llvm::LLVMRustIs64BitSymbolicFile(buf.as_ptr(), buf.len()) } } fn llvm_is_ec_object_file(buf: &[u8]) -> bool { - if !should_use_llvm_reader(buf) { - return (DEFAULT_OBJECT_READER.is_ec_object_file)(buf); - } - unsafe { llvm::LLVMRustIsECObject(buf.as_ptr(), buf.len()) } } @@ -459,39 +250,3 @@ impl<'a> LlvmArchiveBuilder<'a> { fn string_to_io_error(s: String) -> io::Error { io::Error::new(io::ErrorKind::Other, format!("bad archive: {s}")) } - -fn find_binutils_dlltool(sess: &Session) -> OsString { - assert!(sess.target.options.is_like_windows && !sess.target.options.is_like_msvc); - if let Some(dlltool_path) = &sess.opts.cg.dlltool { - return dlltool_path.clone().into_os_string(); - } - - let tool_name: OsString = if sess.host.options.is_like_windows { - // If we're compiling on Windows, always use "dlltool.exe". - "dlltool.exe" - } else { - // On other platforms, use the architecture-specific name. - match sess.target.arch.as_ref() { - "x86_64" => "x86_64-w64-mingw32-dlltool", - "x86" => "i686-w64-mingw32-dlltool", - "aarch64" => "aarch64-w64-mingw32-dlltool", - - // For non-standard architectures (e.g., aarch32) fallback to "dlltool". - _ => "dlltool", - } - } - .into(); - - // NOTE: it's not clear how useful it is to explicitly search PATH. - for dir in env::split_paths(&env::var_os("PATH").unwrap_or_default()) { - let full_path = dir.join(&tool_name); - if full_path.is_file() { - return full_path.into_os_string(); - } - } - - // The user didn't specify the location of the dlltool binary, and we weren't able - // to find the appropriate one on the PATH. Just return the name of the tool - // and let the invocation fail with a hopefully useful error message. - tool_name -} diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index aef672631c8..66479ad7f34 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -1,11 +1,11 @@ -use crate::back::write::{ - self, bitcode_section_name, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers, -}; -use crate::errors::{ - DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro, -}; -use crate::llvm::{self, build_string}; -use crate::{LlvmCodegenBackend, ModuleLlvm}; +use std::collections::BTreeMap; +use std::ffi::{CStr, CString}; +use std::fs::File; +use std::mem::ManuallyDrop; +use std::path::Path; +use std::sync::Arc; +use std::{io, iter, slice}; + use object::read::archive::ArchiveFile; use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared}; use rustc_codegen_ssa::back::symbol_export; @@ -22,21 +22,20 @@ use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel use rustc_session::config::{self, CrateType, Lto}; use tracing::{debug, info}; -use std::collections::BTreeMap; -use std::ffi::{CStr, CString}; -use std::fs::File; -use std::io; -use std::iter; -use std::mem::ManuallyDrop; -use std::path::Path; -use std::slice; -use std::sync::Arc; +use crate::back::write::{ + self, bitcode_section_name, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers, +}; +use crate::errors::{ + DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro, +}; +use crate::llvm::{self, build_string}; +use crate::{LlvmCodegenBackend, ModuleLlvm}; /// We keep track of the computed LTO cache keys from the previous /// session to determine which CGUs we can reuse. -pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin"; +const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin"; -pub fn crate_type_allows_lto(crate_type: CrateType) -> bool { +fn crate_type_allows_lto(crate_type: CrateType) -> bool { match crate_type { CrateType::Executable | CrateType::Dylib @@ -93,11 +92,9 @@ fn prepare_lto( dcx.emit_err(LtoDylib); return Err(FatalError); } - } else if *crate_type == CrateType::ProcMacro { - if !cgcx.opts.unstable_opts.dylib_lto { - dcx.emit_err(LtoProcMacro); - return Err(FatalError); - } + } else if *crate_type == CrateType::ProcMacro && !cgcx.opts.unstable_opts.dylib_lto { + dcx.emit_err(LtoProcMacro); + return Err(FatalError); } } @@ -159,15 +156,15 @@ fn get_bitcode_slice_from_object_data<'a>( obj: &'a [u8], cgcx: &CodegenContext<LlvmCodegenBackend>, ) -> Result<&'a [u8], LtoBitcodeFromRlib> { - // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR that - // won't work. Fortunately, if that's what we have we can just return the object directly, so we sniff - // the relevant magic strings here and return. + // We're about to assume the data here is an object file with sections, but if it's raw LLVM IR + // that won't work. Fortunately, if that's what we have we can just return the object directly, + // so we sniff the relevant magic strings here and return. if obj.starts_with(b"\xDE\xC0\x17\x0B") || obj.starts_with(b"BC\xC0\xDE") { return Ok(obj); } - // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment name" - // which in the public API for sections gets treated as part of the section name, but internally - // in MachOObjectFile.cpp gets treated separately. + // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment + // name" which in the public API for sections gets treated as part of the section name, but + // internally in MachOObjectFile.cpp gets treated separately. let section_name = bitcode_section_name(cgcx).trim_start_matches("__LLVM,"); let mut len = 0; let data = unsafe { @@ -315,7 +312,6 @@ fn fat_lto( } } }; - let mut serialized_bitcode = Vec::new(); { let (llcx, llmod) = { let llvm = &module.module_llvm; @@ -343,9 +339,7 @@ fn fat_lto( serialized_modules.sort_by(|module1, module2| module1.1.cmp(&module2.1)); // For all serialized bitcode files we parse them and link them in as we did - // above, this is all mostly handled in C++. Like above, though, we don't - // know much about the memory management here so we err on the side of being - // save and persist everything with the original module. + // above, this is all mostly handled in C++. let mut linker = Linker::new(llmod); for (bc_decoded, name) in serialized_modules { let _timer = cgcx @@ -356,7 +350,6 @@ fn fat_lto( info!("linking {:?}", name); let data = bc_decoded.data(); linker.add(data).map_err(|()| write::llvm_err(dcx, LlvmError::LoadBitcode { name }))?; - serialized_bitcode.push(bc_decoded); } drop(linker); save_temp_bitcode(cgcx, &module, "lto.input"); @@ -373,7 +366,7 @@ fn fat_lto( } } - Ok(LtoModuleCodegen::Fat { module, _serialized_bitcode: serialized_bitcode }) + Ok(LtoModuleCodegen::Fat(module)) } pub(crate) struct Linker<'a>(&'a mut llvm::Linker<'a>); @@ -617,7 +610,7 @@ pub(crate) fn run_pass_manager( llvm::LLVMRustAddModuleFlagU32( module.module_llvm.llmod(), llvm::LLVMModFlagBehavior::Error, - c"LTOPostLink".as_ptr().cast(), + c"LTOPostLink".as_ptr(), 1, ); } @@ -711,7 +704,7 @@ impl Drop for ThinBuffer { } } -pub unsafe fn optimize_thin_module( +pub(crate) unsafe fn optimize_thin_module( thin_module: ThinModule<LlvmCodegenBackend>, cgcx: &CodegenContext<LlvmCodegenBackend>, ) -> Result<ModuleCodegen<ModuleLlvm>, FatalError> { @@ -807,7 +800,7 @@ pub unsafe fn optimize_thin_module( /// Maps LLVM module identifiers to their corresponding LLVM LTO cache keys #[derive(Debug, Default)] -pub struct ThinLTOKeysMap { +struct ThinLTOKeysMap { // key = llvm name of importing module, value = LLVM cache key keys: BTreeMap<String, String>, } @@ -864,7 +857,7 @@ fn module_name_to_str(c_str: &CStr) -> &str { }) } -pub fn parse_module<'a>( +pub(crate) fn parse_module<'a>( cx: &'a llvm::Context, name: &CStr, data: &[u8], diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs index b72636a6224..76529e0c83b 100644 --- a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs +++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs @@ -1,13 +1,12 @@ -use std::{ - ffi::{c_char, CStr}, - marker::PhantomData, - ops::Deref, - ptr::NonNull, -}; +use std::ffi::{c_char, CStr}; +use std::marker::PhantomData; +use std::ops::Deref; +use std::ptr::NonNull; use rustc_data_structures::small_c_str::SmallCStr; -use crate::{errors::LlvmError, llvm}; +use crate::errors::LlvmError; +use crate::llvm; /// Responsible for safely creating and disposing llvm::TargetMachine via ffi functions. /// Not cloneable as there is no clone function for llvm::TargetMachine. @@ -31,7 +30,7 @@ impl OwnedTargetMachine { data_sections: bool, unique_section_names: bool, trap_unreachable: bool, - singletree: bool, + singlethread: bool, verbose_asm: bool, emit_stack_size_section: bool, relax_elf_relocations: bool, @@ -63,7 +62,7 @@ impl OwnedTargetMachine { data_sections, unique_section_names, trap_unreachable, - singletree, + singlethread, verbose_asm, emit_stack_size_section, relax_elf_relocations, @@ -87,15 +86,17 @@ impl Deref for OwnedTargetMachine { type Target = llvm::TargetMachine; fn deref(&self) -> &Self::Target { - // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine + // SAFETY: constructing ensures we have a valid pointer created by + // llvm::LLVMRustCreateTargetMachine. unsafe { self.tm_unique.as_ref() } } } impl Drop for OwnedTargetMachine { fn drop(&mut self) { - // SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine - // OwnedTargetMachine is not copyable so there is no double free or use after free + // SAFETY: constructing ensures we have a valid pointer created by + // llvm::LLVMRustCreateTargetMachine OwnedTargetMachine is not copyable so there is no + // double free or use after free. unsafe { llvm::LLVMRustDisposeTargetMachine(self.tm_unique.as_mut()); } diff --git a/compiler/rustc_codegen_llvm/src/back/profiling.rs b/compiler/rustc_codegen_llvm/src/back/profiling.rs index 2eee9f8c5a3..79794775b7b 100644 --- a/compiler/rustc_codegen_llvm/src/back/profiling.rs +++ b/compiler/rustc_codegen_llvm/src/back/profiling.rs @@ -1,9 +1,11 @@ -use measureme::{event_id::SEPARATOR_BYTE, EventId, StringComponent, StringId}; -use rustc_data_structures::profiling::{SelfProfiler, TimingGuard}; use std::ffi::{c_void, CStr}; use std::os::raw::c_char; use std::sync::Arc; +use measureme::event_id::SEPARATOR_BYTE; +use measureme::{EventId, StringComponent, StringId}; +use rustc_data_structures::profiling::{SelfProfiler, TimingGuard}; + fn llvm_args_to_string_id(profiler: &SelfProfiler, pass_name: &str, ir_name: &str) -> EventId { let pass_name = profiler.get_or_alloc_cached_string(pass_name); let mut components = vec![StringComponent::Ref(pass_name)]; @@ -19,14 +21,14 @@ fn llvm_args_to_string_id(profiler: &SelfProfiler, pass_name: &str, ir_name: &st EventId::from_label(profiler.alloc_string(components.as_slice())) } -pub struct LlvmSelfProfiler<'a> { +pub(crate) struct LlvmSelfProfiler<'a> { profiler: Arc<SelfProfiler>, stack: Vec<TimingGuard<'a>>, llvm_pass_event_kind: StringId, } impl<'a> LlvmSelfProfiler<'a> { - pub fn new(profiler: Arc<SelfProfiler>) -> Self { + pub(crate) fn new(profiler: Arc<SelfProfiler>) -> Self { let llvm_pass_event_kind = profiler.alloc_string("LLVM Pass"); Self { profiler, stack: Vec::default(), llvm_pass_event_kind } } @@ -41,7 +43,7 @@ impl<'a> LlvmSelfProfiler<'a> { } } -pub unsafe extern "C" fn selfprofile_before_pass_callback( +pub(crate) unsafe extern "C" fn selfprofile_before_pass_callback( llvm_self_profiler: *mut c_void, pass_name: *const c_char, ir_name: *const c_char, @@ -54,7 +56,7 @@ pub unsafe extern "C" fn selfprofile_before_pass_callback( } } -pub unsafe extern "C" fn selfprofile_after_pass_callback(llvm_self_profiler: *mut c_void) { +pub(crate) unsafe extern "C" fn selfprofile_after_pass_callback(llvm_self_profiler: *mut c_void) { let llvm_self_profiler = unsafe { &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>) }; llvm_self_profiler.after_pass_callback(); } diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index ddd52e80edf..d2c4ea8171b 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -1,19 +1,10 @@ -use crate::back::lto::ThinBuffer; -use crate::back::owned_target_machine::OwnedTargetMachine; -use crate::back::profiling::{ - selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler, -}; -use crate::base; -use crate::common; -use crate::errors::{ - CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression, - WithLlvmError, WriteBytecode, -}; -use crate::llvm::{self, DiagnosticInfo, PassManager}; -use crate::llvm_util; -use crate::type_::Type; -use crate::LlvmCodegenBackend; -use crate::ModuleLlvm; +use std::ffi::CString; +use std::io::{self, Write}; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use std::{fs, slice, str}; + +use libc::{c_char, c_int, c_void, size_t}; use llvm::{ LLVMRustLLVMHasZlibCompressionForDebugSymbols, LLVMRustLLVMHasZstdCompressionForDebugSymbols, }; @@ -29,32 +20,37 @@ use rustc_data_structures::small_c_str::SmallCStr; use rustc_errors::{DiagCtxtHandle, FatalError, Level}; use rustc_fs_util::{link_or_copy, path_to_c_string}; use rustc_middle::ty::TyCtxt; -use rustc_session::config::{self, Lto, OutputType, Passes}; -use rustc_session::config::{RemapPathScopeComponents, SplitDwarfKind, SwitchWithOptPath}; +use rustc_session::config::{ + self, Lto, OutputType, Passes, RemapPathScopeComponents, SplitDwarfKind, SwitchWithOptPath, +}; use rustc_session::Session; use rustc_span::symbol::sym; use rustc_span::InnerSpan; use rustc_target::spec::{CodeModel, RelocModel, SanitizerSet, SplitDebuginfo, TlsModel}; use tracing::debug; -use crate::llvm::diagnostic::OptimizationDiagnosticKind; -use libc::{c_char, c_int, c_void, size_t}; -use std::ffi::CString; -use std::fs; -use std::io::{self, Write}; -use std::path::{Path, PathBuf}; -use std::slice; -use std::str; -use std::sync::Arc; +use crate::back::lto::ThinBuffer; +use crate::back::owned_target_machine::OwnedTargetMachine; +use crate::back::profiling::{ + selfprofile_after_pass_callback, selfprofile_before_pass_callback, LlvmSelfProfiler, +}; +use crate::errors::{ + CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression, + WithLlvmError, WriteBytecode, +}; +use crate::llvm::diagnostic::OptimizationDiagnosticKind::*; +use crate::llvm::{self, DiagnosticInfo, PassManager}; +use crate::type_::Type; +use crate::{base, common, llvm_util, LlvmCodegenBackend, ModuleLlvm}; -pub fn llvm_err<'a>(dcx: DiagCtxtHandle<'_>, err: LlvmError<'a>) -> FatalError { +pub(crate) fn llvm_err<'a>(dcx: DiagCtxtHandle<'_>, err: LlvmError<'a>) -> FatalError { match llvm::last_error() { Some(llvm_err) => dcx.emit_almost_fatal(WithLlvmError(err, llvm_err)), None => dcx.emit_almost_fatal(err), } } -pub fn write_output_file<'ll>( +fn write_output_file<'ll>( dcx: DiagCtxtHandle<'_>, target: &'ll llvm::TargetMachine, pm: &llvm::PassManager<'ll>, @@ -99,16 +95,19 @@ pub fn write_output_file<'ll>( } } -pub fn create_informational_target_machine(sess: &Session) -> OwnedTargetMachine { +pub(crate) fn create_informational_target_machine( + sess: &Session, + only_base_features: bool, +) -> OwnedTargetMachine { let config = TargetMachineFactoryConfig { split_dwarf_file: None, output_obj_file: None }; // Can't use query system here quite yet because this function is invoked before the query // system/tcx is set up. - let features = llvm_util::global_llvm_features(sess, false); + let features = llvm_util::global_llvm_features(sess, false, only_base_features); target_machine_factory(sess, config::OptLevel::No, &features)(config) .unwrap_or_else(|err| llvm_err(sess.dcx(), err).raise()) } -pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine { +pub(crate) fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMachine { let split_dwarf_file = if tcx.sess.target_can_use_split_dwarf() { tcx.output_filenames(()).split_dwarf_path( tcx.sess.split_debuginfo(), @@ -131,9 +130,7 @@ pub fn create_target_machine(tcx: TyCtxt<'_>, mod_name: &str) -> OwnedTargetMach .unwrap_or_else(|err| llvm_err(tcx.dcx(), err).raise()) } -pub fn to_llvm_opt_settings( - cfg: config::OptLevel, -) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) { +fn to_llvm_opt_settings(cfg: config::OptLevel) -> (llvm::CodeGenOptLevel, llvm::CodeGenOptSize) { use self::config::OptLevel::*; match cfg { No => (llvm::CodeGenOptLevel::None, llvm::CodeGenOptSizeNone), @@ -160,7 +157,8 @@ fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel fn to_llvm_relocation_model(relocation_model: RelocModel) -> llvm::RelocModel { match relocation_model { RelocModel::Static => llvm::RelocModel::Static, - // LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra attribute. + // LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra + // attribute. RelocModel::Pic | RelocModel::Pie => llvm::RelocModel::PIC, RelocModel::DynamicNoPic => llvm::RelocModel::DynamicNoPic, RelocModel::Ropi => llvm::RelocModel::ROPI, @@ -180,7 +178,7 @@ pub(crate) fn to_llvm_code_model(code_model: Option<CodeModel>) -> llvm::CodeMod } } -pub fn target_machine_factory( +pub(crate) fn target_machine_factory( sess: &Session, optlvl: config::OptLevel, target_features: &[String], @@ -188,7 +186,13 @@ pub fn target_machine_factory( let reloc_model = to_llvm_relocation_model(sess.relocation_model()); let (opt_level, _) = to_llvm_opt_settings(optlvl); - let use_softfp = sess.opts.cg.soft_float; + let use_softfp = if sess.target.arch == "arm" && sess.target.abi == "eabihf" { + sess.opts.cg.soft_float + } else { + // `validate_commandline_args_with_session_available` has already warned about this being + // ignored. Let's make sure LLVM doesn't suddenly start using this flag on more targets. + false + }; let ffunction_sections = sess.opts.unstable_opts.function_sections.unwrap_or(sess.target.function_sections); @@ -321,7 +325,7 @@ pub(crate) fn save_temp_bitcode( } /// In what context is a dignostic handler being attached to a codegen unit? -pub enum CodegenDiagnosticsStage { +pub(crate) enum CodegenDiagnosticsStage { /// Prelink optimization stage. Opt, /// LTO/ThinLTO postlink optimization stage. @@ -330,14 +334,14 @@ pub enum CodegenDiagnosticsStage { Codegen, } -pub struct DiagnosticHandlers<'a> { +pub(crate) struct DiagnosticHandlers<'a> { data: *mut (&'a CodegenContext<LlvmCodegenBackend>, DiagCtxtHandle<'a>), llcx: &'a llvm::Context, old_handler: Option<&'a llvm::DiagnosticHandler>, } impl<'a> DiagnosticHandlers<'a> { - pub fn new( + pub(crate) fn new( cgcx: &'a CodegenContext<LlvmCodegenBackend>, dcx: DiagCtxtHandle<'a>, llcx: &'a llvm::Context, @@ -443,13 +447,12 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void column: opt.column, pass_name: &opt.pass_name, kind: match opt.kind { - OptimizationDiagnosticKind::OptimizationRemark => "success", - OptimizationDiagnosticKind::OptimizationMissed - | OptimizationDiagnosticKind::OptimizationFailure => "missed", - OptimizationDiagnosticKind::OptimizationAnalysis - | OptimizationDiagnosticKind::OptimizationAnalysisFPCommute - | OptimizationDiagnosticKind::OptimizationAnalysisAliasing => "analysis", - OptimizationDiagnosticKind::OptimizationRemarkOther => "other", + OptimizationRemark => "success", + OptimizationMissed | OptimizationFailure => "missed", + OptimizationAnalysis + | OptimizationAnalysisFPCommute + | OptimizationAnalysisAliasing => "analysis", + OptimizationRemarkOther => "other", }, message: &opt.message, }); @@ -574,6 +577,7 @@ pub(crate) unsafe fn llvm_optimize( cgcx.opts.cg.linker_plugin_lto.enabled(), config.no_prepopulate_passes, config.verify_llvm_ir, + config.lint_llvm_ir, using_thin_buffers, config.merge_functions, unroll_loops, @@ -941,11 +945,12 @@ fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data: } fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool { - cgcx.opts.target_triple.triple().contains("-ios") - || cgcx.opts.target_triple.triple().contains("-darwin") - || cgcx.opts.target_triple.triple().contains("-tvos") - || cgcx.opts.target_triple.triple().contains("-watchos") - || cgcx.opts.target_triple.triple().contains("-visionos") + let triple = cgcx.opts.target_triple.triple(); + triple.contains("-ios") + || triple.contains("-darwin") + || triple.contains("-tvos") + || triple.contains("-watchos") + || triple.contains("-visionos") } fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool { @@ -1032,7 +1037,7 @@ unsafe fn embed_bitcode( let llglobal = llvm::LLVMAddGlobal( llmod, common::val_ty(llconst), - c"rustc.embedded.module".as_ptr().cast(), + c"rustc.embedded.module".as_ptr(), ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -1045,7 +1050,7 @@ unsafe fn embed_bitcode( let llglobal = llvm::LLVMAddGlobal( llmod, common::val_ty(llconst), - c"rustc.embedded.cmdline".as_ptr().cast(), + c"rustc.embedded.cmdline".as_ptr(), ); llvm::LLVMSetInitializer(llglobal, llconst); let section = if is_apple { @@ -1055,7 +1060,7 @@ unsafe fn embed_bitcode( } else { c".llvmcmd" }; - llvm::LLVMSetSection(llglobal, section.as_ptr().cast()); + llvm::LLVMSetSection(llglobal, section.as_ptr()); llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); } else { // We need custom section flags, so emit module-level inline assembly. @@ -1108,7 +1113,7 @@ fn create_msvc_imps( .collect::<Vec<_>>(); for (imp_name, val) in globals { - let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr().cast()); + let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr()); llvm::LLVMSetInitializer(imp, val); llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); } diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index 5dc271ccddb..0ba8d82406a 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -11,13 +11,7 @@ //! [`Ty`]: rustc_middle::ty::Ty //! [`val_ty`]: crate::common::val_ty -use super::ModuleLlvm; - -use crate::attributes; -use crate::builder::Builder; -use crate::context::CodegenCx; -use crate::llvm; -use crate::value::Value; +use std::time::Instant; use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use rustc_codegen_ssa::mono_item::MonoItemExt; @@ -32,9 +26,13 @@ use rustc_session::config::DebugInfo; use rustc_span::symbol::Symbol; use rustc_target::spec::SanitizerSet; -use std::time::Instant; +use super::ModuleLlvm; +use crate::builder::Builder; +use crate::context::CodegenCx; +use crate::value::Value; +use crate::{attributes, llvm}; -pub struct ValueIter<'ll> { +pub(crate) struct ValueIter<'ll> { cur: Option<&'ll Value>, step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>, } @@ -51,11 +49,14 @@ impl<'ll> Iterator for ValueIter<'ll> { } } -pub fn iter_globals(llmod: &llvm::Module) -> ValueIter<'_> { +pub(crate) fn iter_globals(llmod: &llvm::Module) -> ValueIter<'_> { unsafe { ValueIter { cur: llvm::LLVMGetFirstGlobal(llmod), step: llvm::LLVMGetNextGlobal } } } -pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen<ModuleLlvm>, u64) { +pub(crate) fn compile_codegen_unit( + tcx: TyCtxt<'_>, + cgu_name: Symbol, +) -> (ModuleCodegen<ModuleLlvm>, u64) { let start_time = Instant::now(); let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx); @@ -142,7 +143,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen (module, cost) } -pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { +pub(crate) fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { let Some(sect) = attrs.link_section else { return }; unsafe { let buf = SmallCStr::new(sect.as_str()); @@ -150,7 +151,7 @@ pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { } } -pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage { +pub(crate) fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage { match linkage { Linkage::External => llvm::Linkage::ExternalLinkage, Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage, @@ -166,7 +167,7 @@ pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage { } } -pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility { +pub(crate) fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility { match linkage { Visibility::Default => llvm::Visibility::Default, Visibility::Hidden => llvm::Visibility::Hidden, diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 72ff9ea118e..6ffe90997f5 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -1,12 +1,7 @@ -use crate::abi::FnAbiLlvmExt; -use crate::attributes; -use crate::common::Funclet; -use crate::context::CodegenCx; -use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, True}; -use crate::llvm_util; -use crate::type_::Type; -use crate::type_of::LayoutLlvmExt; -use crate::value::Value; +use std::borrow::Cow; +use std::ops::Deref; +use std::{iter, ptr}; + use libc::{c_char, c_uint}; use rustc_codegen_ssa::common::{IntPredicate, RealPredicate, SynchronizationScope, TypeKind}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; @@ -17,24 +12,31 @@ use rustc_data_structures::small_c_str::SmallCStr; use rustc_hir::def_id::DefId; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::layout::{ - FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout, + FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, LayoutError, LayoutOfHelpers, + TyAndLayout, }; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; use rustc_sanitizers::{cfi, kcfi}; use rustc_session::config::OptLevel; use rustc_span::Span; -use rustc_target::abi::{self, call::FnAbi, Align, Size, WrappingRange}; +use rustc_target::abi::call::FnAbi; +use rustc_target::abi::{self, Align, Size, WrappingRange}; use rustc_target::spec::{HasTargetSpec, SanitizerSet, Target}; use smallvec::SmallVec; -use std::borrow::Cow; -use std::iter; -use std::ops::Deref; -use std::ptr; use tracing::{debug, instrument}; +use crate::abi::FnAbiLlvmExt; +use crate::attributes; +use crate::common::Funclet; +use crate::context::CodegenCx; +use crate::llvm::{self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, True}; +use crate::type_::Type; +use crate::type_of::LayoutLlvmExt; +use crate::value::Value; + // All Builders must have an llfn associated with them #[must_use] -pub struct Builder<'a, 'll, 'tcx> { +pub(crate) struct Builder<'a, 'll, 'tcx> { pub llbuilder: &'ll mut llvm::Builder<'ll>, pub cx: &'a CodegenCx<'ll, 'tcx>, } @@ -91,8 +93,6 @@ impl HasTargetSpec for Builder<'_, '_, '_> { } impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { - type LayoutOfResult = TyAndLayout<'tcx>; - #[inline] fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { self.cx.handle_layout_err(err, span, ty) @@ -100,8 +100,6 @@ impl<'tcx> LayoutOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { } impl<'tcx> FnAbiOfHelpers<'tcx> for Builder<'_, '_, 'tcx> { - type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>; - #[inline] fn handle_fn_abi_err( &self, @@ -122,11 +120,7 @@ impl<'ll, 'tcx> Deref for Builder<'_, 'll, 'tcx> { } } -impl<'ll, 'tcx> HasCodegen<'tcx> for Builder<'_, 'll, 'tcx> { - type CodegenCx = CodegenCx<'ll, 'tcx>; -} - -macro_rules! builder_methods_for_value_instructions { +macro_rules! math_builder_methods { ($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => { $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value { unsafe { @@ -136,7 +130,21 @@ macro_rules! builder_methods_for_value_instructions { } } +macro_rules! set_math_builder_methods { + ($($name:ident($($arg:ident),*) => ($llvm_capi:ident, $llvm_set_math:ident)),+ $(,)?) => { + $(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value { + unsafe { + let instr = llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED); + llvm::$llvm_set_math(instr); + instr + } + })+ + } +} + impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { + type CodegenCx = CodegenCx<'ll, 'tcx>; + fn build(cx: &'a CodegenCx<'ll, 'tcx>, llbb: &'ll BasicBlock) -> Self { let bx = Builder::with_cx(cx); unsafe { @@ -271,7 +279,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - builder_methods_for_value_instructions! { + math_builder_methods! { add(a, b) => LLVMBuildAdd, fadd(a, b) => LLVMBuildFAdd, sub(a, b) => LLVMBuildSub, @@ -303,84 +311,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { unchecked_umul(x, y) => LLVMBuildNUWMul, } - fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - unsafe { - let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetFastMath(instr); - instr - } - } - - fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - unsafe { - let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetFastMath(instr); - instr - } - } - - fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - unsafe { - let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetFastMath(instr); - instr - } - } - - fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - unsafe { - let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetFastMath(instr); - instr - } - } - - fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - unsafe { - let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetFastMath(instr); - instr - } - } - - fn fadd_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - unsafe { - let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetAlgebraicMath(instr); - instr - } - } - - fn fsub_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - unsafe { - let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetAlgebraicMath(instr); - instr - } - } - - fn fmul_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - unsafe { - let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetAlgebraicMath(instr); - instr - } - } - - fn fdiv_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - unsafe { - let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetAlgebraicMath(instr); - instr - } - } - - fn frem_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { - unsafe { - let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED); - llvm::LLVMRustSetAlgebraicMath(instr); - instr - } + set_math_builder_methods! { + fadd_fast(x, y) => (LLVMBuildFAdd, LLVMRustSetFastMath), + fsub_fast(x, y) => (LLVMBuildFSub, LLVMRustSetFastMath), + fmul_fast(x, y) => (LLVMBuildFMul, LLVMRustSetFastMath), + fdiv_fast(x, y) => (LLVMBuildFDiv, LLVMRustSetFastMath), + frem_fast(x, y) => (LLVMBuildFRem, LLVMRustSetFastMath), + fadd_algebraic(x, y) => (LLVMBuildFAdd, LLVMRustSetAlgebraicMath), + fsub_algebraic(x, y) => (LLVMBuildFSub, LLVMRustSetAlgebraicMath), + fmul_algebraic(x, y) => (LLVMBuildFMul, LLVMRustSetAlgebraicMath), + fdiv_algebraic(x, y) => (LLVMBuildFDiv, LLVMRustSetAlgebraicMath), + frem_algebraic(x, y) => (LLVMBuildFRem, LLVMRustSetAlgebraicMath), } fn checked_binop( @@ -390,8 +331,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { lhs: Self::Value, rhs: Self::Value, ) -> (Self::Value, Self::Value) { + use rustc_middle::ty::IntTy::*; + use rustc_middle::ty::UintTy::*; use rustc_middle::ty::{Int, Uint}; - use rustc_middle::ty::{IntTy::*, UintTy::*}; let new_kind = match ty.kind() { Int(t @ Isize) => Int(t.normalize(self.tcx.sess.target.pointer_width)), @@ -462,6 +404,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { val } } + fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value { if scalar.is_bool() { return self.trunc(val, self.cx().type_i1()); @@ -529,7 +472,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { #[instrument(level = "trace", skip(self))] fn load_operand(&mut self, place: PlaceRef<'tcx, &'ll Value>) -> OperandRef<'tcx, &'ll Value> { if place.layout.is_unsized() { - let tail = self.tcx.struct_tail_with_normalize(place.layout.ty, |ty| ty, || {}); + let tail = self.tcx.struct_tail_for_codegen(place.layout.ty, self.param_env()); if matches!(tail.kind(), ty::Foreign(..)) { // Unsized locals and, at least conceptually, even unsized arguments must be copied // around, which requires dynamically determining their size. Therefore, we cannot @@ -725,13 +668,32 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { llvm::LLVMSetVolatile(store, llvm::True); } if flags.contains(MemFlags::NONTEMPORAL) { - // According to LLVM [1] building a nontemporal store must - // *always* point to a metadata value of the integer 1. - // - // [1]: https://llvm.org/docs/LangRef.html#store-instruction - let one = self.cx.const_i32(1); - let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); - llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); + // Make sure that the current target architectures supports "sane" non-temporal + // stores, i.e., non-temporal stores that are equivalent to regular stores except + // for performance. LLVM doesn't seem to care about this, and will happily treat + // `!nontemporal` stores as-if they were normal stores (for reordering optimizations + // etc) even on x86, despite later lowering them to MOVNT which do *not* behave like + // regular stores but require special fences. So we keep a list of architectures + // where `!nontemporal` is known to be truly just a hint, and use regular stores + // everywhere else. (In the future, we could alternatively ensure that an sfence + // gets emitted after a sequence of movnt before any kind of synchronizing + // operation. But it's not clear how to do that with LLVM.) + // For more context, see <https://github.com/rust-lang/rust/issues/114582> and + // <https://github.com/llvm/llvm-project/issues/64521>. + const WELL_BEHAVED_NONTEMPORAL_ARCHS: &[&str] = + &["aarch64", "arm", "riscv32", "riscv64"]; + + let use_nontemporal = + WELL_BEHAVED_NONTEMPORAL_ARCHS.contains(&&*self.cx.tcx.sess.target.arch); + if use_nontemporal { + // According to LLVM [1] building a nontemporal store must + // *always* point to a metadata value of the integer 1. + // + // [1]: https://llvm.org/docs/LangRef.html#store-instruction + let one = self.cx.const_i32(1); + let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); + llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); + } } store } @@ -1144,6 +1106,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { (val, success) } } + fn atomic_rmw( &mut self, op: rustc_codegen_ssa::common::AtomicRmwBinOp, @@ -1295,15 +1258,9 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn apply_attrs_to_cleanup_callsite(&mut self, llret: &'ll Value) { - if llvm_util::get_version() < (17, 0, 2) { - // Work around https://github.com/llvm/llvm-project/issues/66984. - let noinline = llvm::AttributeKind::NoInline.create_attr(self.llcx); - attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[noinline]); - } else { - // Cleanup is always the cold path. - let cold_inline = llvm::AttributeKind::Cold.create_attr(self.llcx); - attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[cold_inline]); - } + // Cleanup is always the cold path. + let cold_inline = llvm::AttributeKind::Cold.create_attr(self.llcx); + attributes::apply_to_callsite(llret, llvm::AttributePlace::Function, &[cold_inline]); } } @@ -1321,7 +1278,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { Builder { llbuilder, cx } } - pub fn llfn(&self) -> &'ll Value { + pub(crate) fn llfn(&self) -> &'ll Value { unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) } } @@ -1353,15 +1310,25 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { } } - pub fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + pub(crate) fn set_unpredictable(&mut self, inst: &'ll Value) { + unsafe { + llvm::LLVMSetMetadata( + inst, + llvm::MD_unpredictable as c_uint, + llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0), + ); + } + } + + pub(crate) fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) } } - pub fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + pub(crate) fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs) } } - pub fn insert_element( + pub(crate) fn insert_element( &mut self, vec: &'ll Value, elt: &'ll Value, @@ -1370,7 +1337,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { unsafe { llvm::LLVMBuildInsertElement(self.llbuilder, vec, elt, idx, UNNAMED) } } - pub fn shuffle_vector( + pub(crate) fn shuffle_vector( &mut self, v1: &'ll Value, v2: &'ll Value, @@ -1379,65 +1346,77 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, UNNAMED) } } - pub fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + pub(crate) fn vector_reduce_fadd(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src) } } - pub fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + pub(crate) fn vector_reduce_fmul(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src) } } - pub fn vector_reduce_fadd_reassoc(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + pub(crate) fn vector_reduce_fadd_reassoc( + &mut self, + acc: &'ll Value, + src: &'ll Value, + ) -> &'ll Value { unsafe { let instr = llvm::LLVMRustBuildVectorReduceFAdd(self.llbuilder, acc, src); llvm::LLVMRustSetAllowReassoc(instr); instr } } - pub fn vector_reduce_fmul_reassoc(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + pub(crate) fn vector_reduce_fmul_reassoc( + &mut self, + acc: &'ll Value, + src: &'ll Value, + ) -> &'ll Value { unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMul(self.llbuilder, acc, src); llvm::LLVMRustSetAllowReassoc(instr); instr } } - pub fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { + pub(crate) fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src) } } - pub fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { + pub(crate) fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src) } } - pub fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { + pub(crate) fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src) } } - pub fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { + pub(crate) fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src) } } - pub fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { + pub(crate) fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src) } } - pub fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { + pub(crate) fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false) } } - pub fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { + pub(crate) fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false) } } - pub fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { + pub(crate) fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { unsafe { llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed) } } - pub fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { + pub(crate) fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { unsafe { llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed) } } - pub fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { + pub(crate) fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { unsafe { llvm::LLVMAddClause(landing_pad, clause); } } - pub fn catch_ret(&mut self, funclet: &Funclet<'ll>, unwind: &'ll BasicBlock) -> &'ll Value { + pub(crate) fn catch_ret( + &mut self, + funclet: &Funclet<'ll>, + unwind: &'ll BasicBlock, + ) -> &'ll Value { let ret = unsafe { llvm::LLVMBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) }; ret.expect("LLVM does not have support for catchret") } @@ -1483,7 +1462,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { Cow::Owned(casted_args) } - pub fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + pub(crate) fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) } } @@ -1723,8 +1702,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { ) { debug!("mcdc_parameters() with args ({:?}, {:?}, {:?})", fn_name, hash, bitmap_bytes); - assert!(llvm_util::get_version() >= (18, 0, 0), "MCDC intrinsics require LLVM 18 or later"); - let llfn = unsafe { llvm::LLVMRustGetInstrProfMCDCParametersIntrinsic(self.cx().llmod) }; let llty = self.cx.type_func( &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32()], @@ -1758,7 +1735,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { "mcdc_tvbitmap_update() with args ({:?}, {:?}, {:?}, {:?}, {:?})", fn_name, hash, bitmap_bytes, bitmap_index, mcdc_temp ); - assert!(llvm_util::get_version() >= (18, 0, 0), "MCDC intrinsics require LLVM 18 or later"); let llfn = unsafe { llvm::LLVMRustGetInstrProfMCDCTVBitmapUpdateIntrinsic(self.cx().llmod) }; @@ -1800,7 +1776,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { "mcdc_condbitmap_update() with args ({:?}, {:?}, {:?}, {:?}, {:?})", fn_name, hash, cond_loc, mcdc_temp, bool_value ); - assert!(llvm_util::get_version() >= (18, 0, 0), "MCDC intrinsics require LLVM 18 or later"); let llfn = unsafe { llvm::LLVMRustGetInstrProfMCDCCondBitmapIntrinsic(self.cx().llmod) }; let llty = self.cx.type_func( &[ diff --git a/compiler/rustc_codegen_llvm/src/callee.rs b/compiler/rustc_codegen_llvm/src/callee.rs index 659c6ae0d86..206a7069792 100644 --- a/compiler/rustc_codegen_llvm/src/callee.rs +++ b/compiler/rustc_codegen_llvm/src/callee.rs @@ -4,24 +4,18 @@ //! and methods are represented as just a fn ptr and not a full //! closure. -use crate::attributes; -use crate::common; -use crate::context::CodegenCx; -use crate::llvm; -use crate::value::Value; - +use rustc_codegen_ssa::common; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use rustc_middle::ty::{self, Instance, TypeVisitableExt}; use tracing::debug; +use crate::context::CodegenCx; +use crate::llvm; +use crate::value::Value; + /// Codegens a reference to a fn/method item, monomorphizing and /// inlining as it goes. -/// -/// # Parameters -/// -/// - `cx`: the crate context -/// - `instance`: the instance to be instantiated -pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value { +pub(crate) fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value { let tcx = cx.tcx(); debug!("get_fn(instance={:?})", instance); @@ -48,7 +42,7 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> } else { let instance_def_id = instance.def_id(); let llfn = if tcx.sess.target.arch == "x86" - && let Some(dllimport) = common::get_dllimport(tcx, instance_def_id, sym) + && let Some(dllimport) = crate::common::get_dllimport(tcx, instance_def_id, sym) { // Fix for https://github.com/rust-lang/rust/issues/104453 // On x86 Windows, LLVM uses 'L' as the prefix for any private @@ -79,8 +73,6 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> }; debug!("get_fn: not casting pointer!"); - attributes::from_fn_attrs(cx, llfn, instance); - // Apply an appropriate linkage/visibility value to our item that we // just declared. // @@ -109,62 +101,42 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> let is_generic = instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some(); - if is_generic { - // This is a monomorphization. Its expected visibility depends - // on whether we are in share-generics mode. - - if cx.tcx.sess.opts.share_generics() { - // We are in share_generics mode. - + let is_hidden = if is_generic { + // This is a monomorphization of a generic function. + if !cx.tcx.sess.opts.share_generics() { + // When not sharing generics, all instances are in the same + // crate and have hidden visibility. + true + } else { if let Some(instance_def_id) = instance_def_id.as_local() { - // This is a definition from the current crate. If the - // definition is unreachable for downstream crates or - // the current crate does not re-export generics, the - // definition of the instance will have been declared - // as `hidden`. - if cx.tcx.is_unreachable_local_definition(instance_def_id) + // This is a monomorphization of a generic function + // defined in the current crate. It is hidden if: + // - the definition is unreachable for downstream + // crates, or + // - the current crate does not re-export generics + // (because the crate is a C library or executable) + cx.tcx.is_unreachable_local_definition(instance_def_id) || !cx.tcx.local_crate_exports_generics() - { - llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); - } } else { // This is a monomorphization of a generic function - // defined in an upstream crate. - if instance.upstream_monomorphization(tcx).is_some() { - // This is instantiated in another crate. It cannot - // be `hidden`. - } else { - // This is a local instantiation of an upstream definition. - // If the current crate does not re-export it - // (because it is a C library or an executable), it - // will have been declared `hidden`. - if !cx.tcx.local_crate_exports_generics() { - llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); - } - } + // defined in an upstream crate. It is hidden if: + // - it is instantiated in this crate, and + // - the current crate does not re-export generics + instance.upstream_monomorphization(tcx).is_none() + && !cx.tcx.local_crate_exports_generics() } - } else { - // When not sharing generics, all instances are in the same - // crate and have hidden visibility - llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); } } else { - // This is a non-generic function - if cx.tcx.is_codegened_item(instance_def_id) { - // This is a function that is instantiated in the local crate - - if instance_def_id.is_local() { - // This is function that is defined in the local crate. - // If it is not reachable, it is hidden. - if !cx.tcx.is_reachable_non_generic(instance_def_id) { - llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); - } - } else { - // This is a function from an upstream crate that has - // been instantiated here. These are always hidden. - llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); - } - } + // This is a non-generic function. It is hidden if: + // - it is instantiated in the local crate, and + // - it is defined an upstream crate (non-local), or + // - it is not reachable + cx.tcx.is_codegened_item(instance_def_id) + && (!instance_def_id.is_local() + || !cx.tcx.is_reachable_non_generic(instance_def_id)) + }; + if is_hidden { + llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden); } // MinGW: For backward compatibility we rely on the linker to decide whether it diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index fe64649cf70..508c2d1a820 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -1,11 +1,6 @@ //! Code that is useful in various codegen modules. -use crate::consts::const_alloc_to_llvm; -pub use crate::context::CodegenCx; -use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True}; -use crate::type_::Type; -use crate::value::Value; - +use libc::{c_char, c_uint}; use rustc_ast::Mutability; use rustc_codegen_ssa::traits::*; use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher}; @@ -13,14 +8,16 @@ use rustc_hir::def_id::DefId; use rustc_middle::bug; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; use rustc_middle::ty::TyCtxt; -use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType}; +use rustc_session::cstore::DllImport; use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer}; -use rustc_target::spec::Target; - -use libc::{c_char, c_uint}; -use std::fmt::Write; use tracing::debug; +use crate::consts::const_alloc_to_llvm; +pub(crate) use crate::context::CodegenCx; +use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True}; +use crate::type_::Type; +use crate::value::Value; + /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". * @@ -61,21 +58,21 @@ use tracing::debug; /// When inside of a landing pad, each function call in LLVM IR needs to be /// annotated with which landing pad it's a part of. This is accomplished via /// the `OperandBundleDef` value created for MSVC landing pads. -pub struct Funclet<'ll> { +pub(crate) struct Funclet<'ll> { cleanuppad: &'ll Value, operand: OperandBundleDef<'ll>, } impl<'ll> Funclet<'ll> { - pub fn new(cleanuppad: &'ll Value) -> Self { + pub(crate) fn new(cleanuppad: &'ll Value) -> Self { Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]) } } - pub fn cleanuppad(&self) -> &'ll Value { + pub(crate) fn cleanuppad(&self) -> &'ll Value { self.cleanuppad } - pub fn bundle(&self) -> &OperandBundleDef<'ll> { + pub(crate) fn bundle(&self) -> &OperandBundleDef<'ll> { &self.operand } } @@ -95,21 +92,16 @@ impl<'ll> BackendTypes for CodegenCx<'ll, '_> { } impl<'ll> CodegenCx<'ll, '_> { - pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + pub(crate) fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { let len = u64::try_from(elts.len()).expect("LLVMConstArray2 elements len overflow"); unsafe { llvm::LLVMConstArray2(ty, elts.as_ptr(), len) } } - pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { - let len = c_uint::try_from(elts.len()).expect("LLVMConstVector elements len overflow"); - unsafe { llvm::LLVMConstVector(elts.as_ptr(), len) } - } - - pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { + pub(crate) fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { bytes_in_context(self.llcx, bytes) } - pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { + pub(crate) fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { unsafe { let idx = c_uint::try_from(idx).expect("LLVMGetAggregateElement index overflow"); let r = llvm::LLVMGetAggregateElement(v, idx).unwrap(); @@ -121,7 +113,7 @@ impl<'ll> CodegenCx<'ll, '_> { } } -impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { +impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn const_null(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMConstNull(t) } } @@ -134,25 +126,14 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { llvm::LLVMGetPoison(t) } } - fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value { - unsafe { llvm::LLVMConstInt(t, i as u64, True) } - } - - fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value { - unsafe { llvm::LLVMConstInt(t, i, False) } - } - - fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value { - unsafe { - let words = [u as u64, (u >> 64) as u64]; - llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) - } - } - fn const_bool(&self, val: bool) -> &'ll Value { self.const_uint(self.type_i1(), val as u64) } + fn const_i8(&self, i: i8) -> &'ll Value { + self.const_int(self.type_i8(), i as i64) + } + fn const_i16(&self, i: i16) -> &'ll Value { self.const_int(self.type_i16(), i as i64) } @@ -161,8 +142,12 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { self.const_int(self.type_i32(), i as i64) } - fn const_i8(&self, i: i8) -> &'ll Value { - self.const_int(self.type_i8(), i as i64) + fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value { + unsafe { llvm::LLVMConstInt(t, i as u64, True) } + } + + fn const_u8(&self, i: u8) -> &'ll Value { + self.const_uint(self.type_i8(), i as u64) } fn const_u32(&self, i: u32) -> &'ll Value { @@ -187,8 +172,15 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { self.const_uint(self.isize_ty, i) } - fn const_u8(&self, i: u8) -> &'ll Value { - self.const_uint(self.type_i8(), i as u64) + fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value { + unsafe { llvm::LLVMConstInt(t, i, False) } + } + + fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value { + unsafe { + let words = [u as u64, (u >> 64) as u64]; + llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) + } } fn const_real(&self, t: &'ll Type, val: f64) -> &'ll Value { @@ -224,6 +216,11 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { struct_in_context(self.llcx, elts, packed) } + fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { + let len = c_uint::try_from(elts.len()).expect("LLVMConstVector elements len overflow"); + unsafe { llvm::LLVMConstVector(elts.as_ptr(), len) } + } + fn const_to_opt_uint(&self, v: &'ll Value) -> Option<u64> { try_as_const_integral(v).and_then(|v| unsafe { let mut i = 0u64; @@ -342,18 +339,18 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } /// Get the [LLVM type][Type] of a [`Value`]. -pub fn val_ty(v: &Value) -> &Type { +pub(crate) fn val_ty(v: &Value) -> &Type { unsafe { llvm::LLVMTypeOf(v) } } -pub fn bytes_in_context<'ll>(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { +pub(crate) fn bytes_in_context<'ll>(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { unsafe { let ptr = bytes.as_ptr() as *const c_char; llvm::LLVMConstStringInContext2(llcx, ptr, bytes.len(), True) } } -pub fn struct_in_context<'ll>( +fn struct_in_context<'ll>( llcx: &'ll llvm::Context, elts: &[&'ll Value], packed: bool, @@ -379,64 +376,3 @@ pub(crate) fn get_dllimport<'tcx>( tcx.native_library(id) .and_then(|lib| lib.dll_imports.iter().find(|di| di.name.as_str() == name)) } - -pub(crate) fn is_mingw_gnu_toolchain(target: &Target) -> bool { - target.vendor == "pc" && target.os == "windows" && target.env == "gnu" && target.abi.is_empty() -} - -pub(crate) fn i686_decorated_name( - dll_import: &DllImport, - mingw: bool, - disable_name_mangling: bool, -) -> String { - let name = dll_import.name.as_str(); - - let (add_prefix, add_suffix) = match dll_import.import_name_type { - Some(PeImportNameType::NoPrefix) => (false, true), - Some(PeImportNameType::Undecorated) => (false, false), - _ => (true, true), - }; - - // Worst case: +1 for disable name mangling, +1 for prefix, +4 for suffix (@@__). - let mut decorated_name = String::with_capacity(name.len() + 6); - - if disable_name_mangling { - // LLVM uses a binary 1 ('\x01') prefix to a name to indicate that mangling needs to be disabled. - decorated_name.push('\x01'); - } - - let prefix = if add_prefix && dll_import.is_fn { - match dll_import.calling_convention { - DllCallingConvention::C | DllCallingConvention::Vectorcall(_) => None, - DllCallingConvention::Stdcall(_) => (!mingw - || dll_import.import_name_type == Some(PeImportNameType::Decorated)) - .then_some('_'), - DllCallingConvention::Fastcall(_) => Some('@'), - } - } else if !dll_import.is_fn && !mingw { - // For static variables, prefix with '_' on MSVC. - Some('_') - } else { - None - }; - if let Some(prefix) = prefix { - decorated_name.push(prefix); - } - - decorated_name.push_str(name); - - if add_suffix && dll_import.is_fn { - match dll_import.calling_convention { - DllCallingConvention::C => {} - DllCallingConvention::Stdcall(arg_list_size) - | DllCallingConvention::Fastcall(arg_list_size) => { - write!(&mut decorated_name, "@{arg_list_size}").unwrap(); - } - DllCallingConvention::Vectorcall(arg_list_size) => { - write!(&mut decorated_name, "@@{arg_list_size}").unwrap(); - } - } - } - - decorated_name -} diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 0e50e32913c..33d3b5d4474 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -1,13 +1,6 @@ -use crate::base; -use crate::common::{self, CodegenCx}; -use crate::debuginfo; -use crate::errors::{ - InvalidMinimumAlignmentNotPowerOfTwo, InvalidMinimumAlignmentTooLarge, SymbolAlreadyDefined, -}; -use crate::llvm::{self, True}; -use crate::type_::Type; -use crate::type_of::LayoutLlvmExt; -use crate::value::Value; +use std::ops::Range; + +use rustc_codegen_ssa::common; use rustc_codegen_ssa::traits::*; use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; @@ -24,10 +17,19 @@ use rustc_session::config::Lto; use rustc_target::abi::{ Align, AlignFromBytesError, HasDataLayout, Primitive, Scalar, Size, WrappingRange, }; -use std::ops::Range; use tracing::{debug, instrument, trace}; -pub fn const_alloc_to_llvm<'ll>( +use crate::common::CodegenCx; +use crate::errors::{ + InvalidMinimumAlignmentNotPowerOfTwo, InvalidMinimumAlignmentTooLarge, SymbolAlreadyDefined, +}; +use crate::llvm::{self, True}; +use crate::type_::Type; +use crate::type_of::LayoutLlvmExt; +use crate::value::Value; +use crate::{base, debuginfo}; + +pub(crate) fn const_alloc_to_llvm<'ll>( cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>, is_static: bool, @@ -71,8 +73,8 @@ pub fn const_alloc_to_llvm<'ll>( // Generating partially-uninit consts is limited to small numbers of chunks, // to avoid the cost of generating large complex const expressions. - // For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element, - // and would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`. + // For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element, and + // would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`. let max = cx.sess().opts.unstable_opts.uninit_const_chunk_threshold; let allow_uninit_chunks = chunks.clone().take(max.saturating_add(1)).count() <= max; @@ -194,7 +196,7 @@ fn check_and_apply_linkage<'ll, 'tcx>( g2 } } else if cx.tcx.sess.target.arch == "x86" - && let Some(dllimport) = common::get_dllimport(cx.tcx, def_id, sym) + && let Some(dllimport) = crate::common::get_dllimport(cx.tcx, def_id, sym) { cx.declare_global( &common::i686_decorated_name( @@ -247,8 +249,8 @@ impl<'ll> CodegenCx<'ll, '_> { trace!(?instance); let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() }; - // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure out - // the llvm type from the actual evaluated initializer. + // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure + // out the llvm type from the actual evaluated initializer. let llty = if nested { self.type_i8() } else { @@ -260,7 +262,7 @@ impl<'ll> CodegenCx<'ll, '_> { } #[instrument(level = "debug", skip(self, llty))] - pub(crate) fn get_static_inner(&self, def_id: DefId, llty: &'ll Type) -> &'ll Value { + fn get_static_inner(&self, def_id: DefId, llty: &'ll Type) -> &'ll Value { let instance = Instance::mono(self.tcx, def_id); if let Some(&g) = self.instances.borrow().get(&instance) { trace!("used cached value"); @@ -318,15 +320,16 @@ impl<'ll> CodegenCx<'ll, '_> { } if !def_id.is_local() { - let needs_dll_storage_attr = self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) && + let needs_dll_storage_attr = self.use_dll_storage_attrs + && !self.tcx.is_foreign_item(def_id) // Local definitions can never be imported, so we must not apply // the DLLImport annotation. - !dso_local && + && !dso_local // ThinLTO can't handle this workaround in all cases, so we don't // emit the attrs. Instead we make them unnecessary by disallowing // dynamic linking when linker plugin based LTO is enabled. - !self.tcx.sess.opts.cg.linker_plugin_lto.enabled() && - self.tcx.sess.lto() != Lto::Thin; + && !self.tcx.sess.opts.cg.linker_plugin_lto.enabled() + && self.tcx.sess.lto() != Lto::Thin; // If this assertion triggers, there's something wrong with commandline // argument validation. @@ -388,7 +391,7 @@ impl<'ll> CodegenCx<'ll, '_> { let val_llty = self.val_ty(v); let g = self.get_static_inner(def_id, val_llty); - let llty = self.val_ty(g); + let llty = llvm::LLVMGlobalGetValueType(g); let g = if val_llty == llty { g @@ -471,7 +474,7 @@ impl<'ll> CodegenCx<'ll, '_> { let val = llvm::LLVMMetadataAsValue(self.llcx, meta); llvm::LLVMAddNamedMetadataOperand( self.llmod, - c"wasm.custom_sections".as_ptr().cast(), + c"wasm.custom_sections".as_ptr(), val, ); } @@ -497,8 +500,8 @@ impl<'ll> CodegenCx<'ll, '_> { // `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage // on other targets, in particular MachO targets have *their* static constructor // lists broken if `llvm.compiler.used` is emitted rather than `llvm.used`. However, - // that check happens when assigning the `CodegenFnAttrFlags` in `rustc_hir_analysis`, - // so we don't need to take care of it here. + // that check happens when assigning the `CodegenFnAttrFlags` in + // `rustc_hir_analysis`, so we don't need to take care of it here. self.add_compiler_used_global(g); } if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) { @@ -511,7 +514,7 @@ impl<'ll> CodegenCx<'ll, '_> { } } -impl<'ll> StaticMethods for CodegenCx<'ll, '_> { +impl<'ll> StaticCodegenMethods for CodegenCx<'ll, '_> { fn static_addr_of(&self, cv: &'ll Value, align: Align, kind: Option<&str>) -> &'ll Value { if let Some(&gv) = self.const_globals.borrow().get(&cv) { unsafe { diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 49677dcf12f..1d5580fdd07 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -1,48 +1,44 @@ -use crate::attributes; -use crate::back::write::to_llvm_code_model; -use crate::callee::get_fn; -use crate::coverageinfo; -use crate::debuginfo; -use crate::debuginfo::metadata::apply_vcall_visibility_metadata; -use crate::llvm; -use crate::llvm_util; -use crate::type_::Type; -use crate::value::Value; +use std::borrow::Borrow; +use std::cell::{Cell, RefCell}; +use std::ffi::CStr; +use std::str; +use libc::c_uint; use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh}; use rustc_codegen_ssa::errors as ssa_errors; use rustc_codegen_ssa::traits::*; -use rustc_data_structures::base_n::ToBaseN; -use rustc_data_structures::base_n::ALPHANUMERIC_ONLY; +use rustc_data_structures::base_n::{ToBaseN, ALPHANUMERIC_ONLY}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::small_c_str::SmallCStr; use rustc_hir::def_id::DefId; +use rustc_middle::middle::codegen_fn_attrs::PatchableFunctionEntry; use rustc_middle::mir::mono::CodegenUnit; use rustc_middle::ty::layout::{ FnAbiError, FnAbiOfHelpers, FnAbiRequest, HasParamEnv, LayoutError, LayoutOfHelpers, - TyAndLayout, }; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; use rustc_middle::{bug, span_bug}; -use rustc_session::config::{BranchProtection, CFGuard, CFProtection}; -use rustc_session::config::{CrateType, DebugInfo, PAuthKey, PacRet}; +use rustc_session::config::{ + BranchProtection, CFGuard, CFProtection, CrateType, DebugInfo, PAuthKey, PacRet, +}; use rustc_session::Session; use rustc_span::source_map::Spanned; use rustc_span::{Span, DUMMY_SP}; -use rustc_target::abi::{call::FnAbi, HasDataLayout, TargetDataLayout, VariantIdx}; -use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel}; +use rustc_target::abi::{HasDataLayout, TargetDataLayout, VariantIdx}; +use rustc_target::spec::{HasTargetSpec, RelocModel, SmallDataThresholdSupport, Target, TlsModel}; use smallvec::SmallVec; -use libc::c_uint; -use std::borrow::Borrow; -use std::cell::{Cell, RefCell}; -use std::ffi::CStr; -use std::str; +use crate::back::write::to_llvm_code_model; +use crate::callee::get_fn; +use crate::debuginfo::metadata::apply_vcall_visibility_metadata; +use crate::type_::Type; +use crate::value::Value; +use crate::{attributes, coverageinfo, debuginfo, llvm, llvm_util}; -/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM -/// `llvm::Context` so that several compilation units may be optimized in parallel. +/// There is one `CodegenCx` per codegen unit. Each one has its own LLVM +/// `llvm::Context` so that several codegen units may be processed in parallel. /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`. -pub struct CodegenCx<'ll, 'tcx> { +pub(crate) struct CodegenCx<'ll, 'tcx> { pub tcx: TyCtxt<'tcx>, pub use_dll_storage_attrs: bool, pub tls_model: llvm::ThreadLocalMode, @@ -113,7 +109,7 @@ fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode { } } -pub unsafe fn create_module<'ll>( +pub(crate) unsafe fn create_module<'ll>( tcx: TyCtxt<'_>, llcx: &'ll llvm::Context, mod_name: &str, @@ -124,14 +120,6 @@ pub unsafe fn create_module<'ll>( let mut target_data_layout = sess.target.data_layout.to_string(); let llvm_version = llvm_util::get_version(); - if llvm_version < (18, 0, 0) { - if sess.target.arch == "x86" || sess.target.arch == "x86_64" { - // LLVM 18 adjusts i128 to be 128-bit aligned on x86 variants. - // Earlier LLVMs leave this as default alignment, so remove it. - // See https://reviews.llvm.org/D86310 - target_data_layout = target_data_layout.replace("-i128:128", ""); - } - } if llvm_version < (19, 0, 0) { if sess.target.arch == "aarch64" || sess.target.arch.starts_with("arm64") { @@ -152,7 +140,7 @@ pub unsafe fn create_module<'ll>( // Ensure the data-layout values hardcoded remain the defaults. { - let tm = crate::back::write::create_informational_target_machine(tcx.sess); + let tm = crate::back::write::create_informational_target_machine(tcx.sess, false); unsafe { llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, &tm); } @@ -210,7 +198,7 @@ pub unsafe fn create_module<'ll>( // If skipping the PLT is enabled, we need to add some module metadata // to ensure intrinsic calls don't use it. if !sess.needs_plt() { - let avoid_plt = c"RtLibUseGOT".as_ptr().cast(); + let avoid_plt = c"RtLibUseGOT".as_ptr(); unsafe { llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1); } @@ -218,7 +206,7 @@ pub unsafe fn create_module<'ll>( // Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.) if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() { - let canonical_jump_tables = c"CFI Canonical Jump Tables".as_ptr().cast(); + let canonical_jump_tables = c"CFI Canonical Jump Tables".as_ptr(); unsafe { llvm::LLVMRustAddModuleFlagU32( llmod, @@ -229,9 +217,24 @@ pub unsafe fn create_module<'ll>( } } - // Enable LTO unit splitting if specified or if CFI is enabled. (See https://reviews.llvm.org/D53891.) + // If we're normalizing integers with CFI, ensure LLVM generated functions do the same. + // See https://github.com/llvm/llvm-project/pull/104826 + if sess.is_sanitizer_cfi_normalize_integers_enabled() { + let cfi_normalize_integers = c"cfi-normalize-integers".as_ptr().cast(); + unsafe { + llvm::LLVMRustAddModuleFlagU32( + llmod, + llvm::LLVMModFlagBehavior::Override, + cfi_normalize_integers, + 1, + ); + } + } + + // Enable LTO unit splitting if specified or if CFI is enabled. (See + // https://reviews.llvm.org/D53891.) if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() { - let enable_split_lto_unit = c"EnableSplitLTOUnit".as_ptr().cast(); + let enable_split_lto_unit = c"EnableSplitLTOUnit".as_ptr(); unsafe { llvm::LLVMRustAddModuleFlagU32( llmod, @@ -244,10 +247,26 @@ pub unsafe fn create_module<'ll>( // Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.) if sess.is_sanitizer_kcfi_enabled() { - let kcfi = c"kcfi".as_ptr().cast(); + let kcfi = c"kcfi".as_ptr(); unsafe { llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1); } + + // Add "kcfi-offset" module flag with -Z patchable-function-entry (See + // https://reviews.llvm.org/D141172). + let pfe = + PatchableFunctionEntry::from_config(sess.opts.unstable_opts.patchable_function_entry); + if pfe.prefix() > 0 { + let kcfi_offset = c"kcfi-offset".as_ptr().cast(); + unsafe { + llvm::LLVMRustAddModuleFlagU32( + llmod, + llvm::LLVMModFlagBehavior::Override, + kcfi_offset, + pfe.prefix().into(), + ); + } + } } // Control Flow Guard is currently only supported by the MSVC linker on Windows. @@ -283,26 +302,26 @@ pub unsafe fn create_module<'ll>( llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, - c"branch-target-enforcement".as_ptr().cast(), + c"branch-target-enforcement".as_ptr(), bti.into(), ); llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, - c"sign-return-address".as_ptr().cast(), + c"sign-return-address".as_ptr(), pac_ret.is_some().into(), ); let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A }); llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, - c"sign-return-address-all".as_ptr().cast(), + c"sign-return-address-all".as_ptr(), pac_opts.leaf.into(), ); llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Min, - c"sign-return-address-with-bkey".as_ptr().cast(), + c"sign-return-address-with-bkey".as_ptr(), u32::from(pac_opts.key == PAuthKey::B), ); } @@ -320,7 +339,7 @@ pub unsafe fn create_module<'ll>( llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Override, - c"cf-protection-branch".as_ptr().cast(), + c"cf-protection-branch".as_ptr(), 1, ); } @@ -330,7 +349,7 @@ pub unsafe fn create_module<'ll>( llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Override, - c"cf-protection-return".as_ptr().cast(), + c"cf-protection-return".as_ptr(), 1, ); } @@ -341,7 +360,7 @@ pub unsafe fn create_module<'ll>( llvm::LLVMRustAddModuleFlagU32( llmod, llvm::LLVMModFlagBehavior::Error, - c"Virtual Function Elim".as_ptr().cast(), + c"Virtual Function Elim".as_ptr(), 1, ); } @@ -359,6 +378,24 @@ pub unsafe fn create_module<'ll>( } } + match (sess.opts.unstable_opts.small_data_threshold, sess.target.small_data_threshold_support()) + { + // Set up the small-data optimization limit for architectures that use + // an LLVM module flag to control this. + (Some(threshold), SmallDataThresholdSupport::LlvmModuleFlag(flag)) => { + let flag = SmallCStr::new(flag.as_ref()); + unsafe { + llvm::LLVMRustAddModuleFlagU32( + llmod, + llvm::LLVMModFlagBehavior::Error, + flag.as_c_str().as_ptr(), + threshold as u32, + ) + } + } + _ => (), + }; + // Insert `llvm.ident` metadata. // // On the wasm targets it will get hooked up to the "producer" sections @@ -534,7 +571,9 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { } #[inline] - pub fn coverage_context(&self) -> Option<&coverageinfo::CrateCoverageContext<'ll, 'tcx>> { + pub(crate) fn coverage_context( + &self, + ) -> Option<&coverageinfo::CrateCoverageContext<'ll, 'tcx>> { self.coverage_cx.as_ref() } @@ -550,7 +589,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { } } -impl<'ll, 'tcx> MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> { +impl<'ll, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn vtables( &self, ) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>> @@ -778,10 +817,10 @@ impl<'ll> CodegenCx<'ll, '_> { ifn!("llvm.debugtrap", fn() -> void); ifn!("llvm.frameaddress", fn(t_i32) -> ptr); - ifn!("llvm.powi.f16", fn(t_f16, t_i32) -> t_f16); - ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); - ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); - ifn!("llvm.powi.f128", fn(t_f128, t_i32) -> t_f128); + ifn!("llvm.powi.f16.i32", fn(t_f16, t_i32) -> t_f16); + ifn!("llvm.powi.f32.i32", fn(t_f32, t_i32) -> t_f32); + ifn!("llvm.powi.f64.i32", fn(t_f64, t_i32) -> t_f64); + ifn!("llvm.powi.f128.i32", fn(t_f128, t_i32) -> t_f128); ifn!("llvm.pow.f16", fn(t_f16, t_f16) -> t_f16); ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); @@ -1003,8 +1042,10 @@ impl<'ll> CodegenCx<'ll, '_> { ifn!("llvm.is.constant.i64", fn(t_i64) -> i1); ifn!("llvm.is.constant.i128", fn(t_i128) -> i1); ifn!("llvm.is.constant.isize", fn(t_isize) -> i1); + ifn!("llvm.is.constant.f16", fn(t_f16) -> i1); ifn!("llvm.is.constant.f32", fn(t_f32) -> i1); ifn!("llvm.is.constant.f64", fn(t_f64) -> i1); + ifn!("llvm.is.constant.f128", fn(t_f128) -> i1); ifn!("llvm.is.constant.ptr", fn(ptr) -> i1); ifn!("llvm.expect.i1", fn(i1, i1) -> i1); @@ -1067,7 +1108,7 @@ impl<'ll> CodegenCx<'ll, '_> { impl CodegenCx<'_, '_> { /// Generates a new symbol name with the given prefix. This symbol name must /// only be used for definitions with `internal` or `private` linkage. - pub fn generate_local_symbol_name(&self, prefix: &str) -> String { + pub(crate) fn generate_local_symbol_name(&self, prefix: &str) -> String { let idx = self.local_gen_sym_counter.get(); self.local_gen_sym_counter.set(idx + 1); // Include a '.' character, so there can be no accidental conflicts with @@ -1108,8 +1149,6 @@ impl<'tcx, 'll> HasParamEnv<'tcx> for CodegenCx<'ll, 'tcx> { } impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> { - type LayoutOfResult = TyAndLayout<'tcx>; - #[inline] fn handle_layout_err(&self, err: LayoutError<'tcx>, span: Span, ty: Ty<'tcx>) -> ! { if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err { @@ -1121,8 +1160,6 @@ impl<'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'_, 'tcx> { } impl<'tcx> FnAbiOfHelpers<'tcx> for CodegenCx<'_, 'tcx> { - type FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>; - #[inline] fn handle_fn_abi_err( &self, diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs index 584d033d6bd..77821ca89bc 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs @@ -1,5 +1,5 @@ use rustc_middle::mir::coverage::{ - CodeRegion, ConditionInfo, CounterId, CovTerm, DecisionInfo, ExpressionId, MappingKind, + ConditionInfo, CounterId, CovTerm, DecisionInfo, ExpressionId, MappingKind, SourceRegion, }; /// Must match the layout of `LLVMRustCounterKind`. @@ -80,7 +80,7 @@ pub struct CounterExpression { /// Must match the layout of `LLVMRustCounterMappingRegionKind`. #[derive(Copy, Clone, Debug)] #[repr(C)] -pub enum RegionKind { +enum RegionKind { /// A CodeRegion associates some code with a counter CodeRegion = 0, @@ -110,31 +110,32 @@ pub enum RegionKind { MCDCBranchRegion = 6, } -pub mod mcdc { +mod mcdc { use rustc_middle::mir::coverage::{ConditionInfo, DecisionInfo}; /// Must match the layout of `LLVMRustMCDCDecisionParameters`. #[repr(C)] #[derive(Clone, Copy, Debug, Default)] - pub struct DecisionParameters { + pub(crate) struct DecisionParameters { bitmap_idx: u32, num_conditions: u16, } - // ConditionId in llvm is `unsigned int` at 18 while `int16_t` at [19](https://github.com/llvm/llvm-project/pull/81257) + // ConditionId in llvm is `unsigned int` at 18 while `int16_t` at + // [19](https://github.com/llvm/llvm-project/pull/81257). type LLVMConditionId = i16; /// Must match the layout of `LLVMRustMCDCBranchParameters`. #[repr(C)] #[derive(Clone, Copy, Debug, Default)] - pub struct BranchParameters { + pub(crate) struct BranchParameters { condition_id: LLVMConditionId, condition_ids: [LLVMConditionId; 2], } #[repr(C)] #[derive(Clone, Copy, Debug)] - pub enum ParameterTag { + enum ParameterTag { None = 0, Decision = 1, Branch = 2, @@ -142,24 +143,24 @@ pub mod mcdc { /// Same layout with `LLVMRustMCDCParameters` #[repr(C)] #[derive(Clone, Copy, Debug)] - pub struct Parameters { + pub(crate) struct Parameters { tag: ParameterTag, decision_params: DecisionParameters, branch_params: BranchParameters, } impl Parameters { - pub fn none() -> Self { + pub(crate) fn none() -> Self { Self { tag: ParameterTag::None, decision_params: Default::default(), branch_params: Default::default(), } } - pub fn decision(decision_params: DecisionParameters) -> Self { + pub(crate) fn decision(decision_params: DecisionParameters) -> Self { Self { tag: ParameterTag::Decision, decision_params, branch_params: Default::default() } } - pub fn branch(branch_params: BranchParameters) -> Self { + pub(crate) fn branch(branch_params: BranchParameters) -> Self { Self { tag: ParameterTag::Branch, decision_params: Default::default(), branch_params } } } @@ -236,9 +237,10 @@ impl CounterMappingRegion { pub(crate) fn from_mapping( mapping_kind: &MappingKind, local_file_id: u32, - code_region: &CodeRegion, + source_region: &SourceRegion, ) -> Self { - let &CodeRegion { file_name: _, start_line, start_col, end_line, end_col } = code_region; + let &SourceRegion { file_name: _, start_line, start_col, end_line, end_col } = + source_region; match *mapping_kind { MappingKind::Code(term) => Self::code_region( Counter::from_term(term), diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs index 14a94468587..5ed640b840e 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/map_data.rs @@ -1,20 +1,20 @@ -use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind}; - use rustc_data_structures::captures::Captures; use rustc_data_structures::fx::FxIndexSet; use rustc_index::bit_set::BitSet; use rustc_middle::mir::coverage::{ - CodeRegion, CounterId, CovTerm, Expression, ExpressionId, FunctionCoverageInfo, Mapping, - MappingKind, Op, + CounterId, CovTerm, Expression, ExpressionId, FunctionCoverageInfo, Mapping, MappingKind, Op, + SourceRegion, }; use rustc_middle::ty::Instance; use rustc_span::Symbol; use tracing::{debug, instrument}; +use crate::coverageinfo::ffi::{Counter, CounterExpression, ExprKind}; + /// Holds all of the coverage mapping data associated with a function instance, /// collected during traversal of `Coverage` statements in the function's MIR. #[derive(Debug)] -pub struct FunctionCoverageCollector<'tcx> { +pub(crate) struct FunctionCoverageCollector<'tcx> { /// Coverage info that was attached to this function by the instrumentor. function_coverage_info: &'tcx FunctionCoverageInfo, is_used: bool, @@ -32,7 +32,7 @@ pub struct FunctionCoverageCollector<'tcx> { impl<'tcx> FunctionCoverageCollector<'tcx> { /// Creates a new set of coverage data for a used (called) function. - pub fn new( + pub(crate) fn new( instance: Instance<'tcx>, function_coverage_info: &'tcx FunctionCoverageInfo, ) -> Self { @@ -40,7 +40,7 @@ impl<'tcx> FunctionCoverageCollector<'tcx> { } /// Creates a new set of coverage data for an unused (never called) function. - pub fn unused( + pub(crate) fn unused( instance: Instance<'tcx>, function_coverage_info: &'tcx FunctionCoverageInfo, ) -> Self { @@ -195,13 +195,13 @@ impl<'tcx> FunctionCoverage<'tcx> { /// Return the source hash, generated from the HIR node structure, and used to indicate whether /// or not the source code structure changed between different compilations. - pub fn source_hash(&self) -> u64 { + pub(crate) fn source_hash(&self) -> u64 { if self.is_used { self.function_coverage_info.function_source_hash } else { 0 } } /// Returns an iterator over all filenames used by this function's mappings. pub(crate) fn all_file_names(&self) -> impl Iterator<Item = Symbol> + Captures<'_> { - self.function_coverage_info.mappings.iter().map(|mapping| mapping.code_region.file_name) + self.function_coverage_info.mappings.iter().map(|mapping| mapping.source_region.file_name) } /// Convert this function's coverage expression data into a form that can be @@ -230,12 +230,12 @@ impl<'tcx> FunctionCoverage<'tcx> { /// that will be used by `mapgen` when preparing for FFI. pub(crate) fn counter_regions( &self, - ) -> impl Iterator<Item = (MappingKind, &CodeRegion)> + ExactSizeIterator { + ) -> impl Iterator<Item = (MappingKind, &SourceRegion)> + ExactSizeIterator { self.function_coverage_info.mappings.iter().map(move |mapping| { - let Mapping { kind, code_region } = mapping; + let Mapping { kind, source_region } = mapping; let kind = kind.map_terms(|term| if self.is_zero_term(term) { CovTerm::Zero } else { term }); - (kind, code_region) + (kind, source_region) }) } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index d2c0f20c285..a9f65ee8a93 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -1,21 +1,19 @@ -use crate::common::CodegenCx; -use crate::coverageinfo; -use crate::coverageinfo::ffi::CounterMappingRegion; -use crate::coverageinfo::map_data::{FunctionCoverage, FunctionCoverageCollector}; -use crate::llvm; - use itertools::Itertools as _; -use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods}; +use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, ConstCodegenMethods}; use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet}; use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_index::IndexVec; -use rustc_middle::bug; -use rustc_middle::mir; use rustc_middle::ty::{self, TyCtxt}; +use rustc_middle::{bug, mir}; use rustc_span::def_id::DefIdSet; use rustc_span::Symbol; use tracing::debug; +use crate::common::CodegenCx; +use crate::coverageinfo::ffi::CounterMappingRegion; +use crate::coverageinfo::map_data::{FunctionCoverage, FunctionCoverageCollector}; +use crate::{coverageinfo, llvm}; + /// Generates and exports the Coverage Map. /// /// Rust Coverage Map generation supports LLVM Coverage Mapping Format versions @@ -29,7 +27,7 @@ use tracing::debug; /// implementing this Rust version, and though the format documentation is very explicit and /// detailed, some undocumented details in Clang's implementation (that may or may not be important) /// were also replicated for Rust's Coverage Map. -pub fn finalize(cx: &CodegenCx<'_, '_>) { +pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) { let tcx = cx.tcx; // Ensure that LLVM is using a version of the coverage mapping format that @@ -424,7 +422,7 @@ fn prepare_usage_sets<'tcx>(tcx: TyCtxt<'tcx>) -> UsageSets<'tcx> { (instance.def_id(), body) }); - // Functions whose coverage statments were found inlined into other functions. + // Functions whose coverage statements were found inlined into other functions. let mut used_via_inlining = FxHashSet::default(); // Functions that were instrumented, but had all of their coverage statements // removed by later MIR transforms (e.g. UnreachablePropagation). diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index 7b7f8c885bb..c9d2a1c9b88 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -1,14 +1,9 @@ -use crate::llvm; - -use crate::builder::Builder; -use crate::common::CodegenCx; -use crate::coverageinfo::ffi::{CounterExpression, CounterMappingRegion}; -use crate::coverageinfo::map_data::FunctionCoverageCollector; +use std::cell::RefCell; use libc::c_uint; use rustc_codegen_ssa::traits::{ - BaseTypeMethods, BuilderMethods, ConstMethods, CoverageInfoBuilderMethods, MiscMethods, - StaticMethods, + BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods, CoverageInfoBuilderMethods, + MiscCodegenMethods, StaticCodegenMethods, }; use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; use rustc_llvm::RustString; @@ -19,14 +14,18 @@ use rustc_middle::ty::Instance; use rustc_target::abi::{Align, Size}; use tracing::{debug, instrument}; -use std::cell::RefCell; +use crate::builder::Builder; +use crate::common::CodegenCx; +use crate::coverageinfo::ffi::{CounterExpression, CounterMappingRegion}; +use crate::coverageinfo::map_data::FunctionCoverageCollector; +use crate::llvm; pub(crate) mod ffi; pub(crate) mod map_data; -pub mod mapgen; +mod mapgen; /// A context object for maintaining all state needed by the coverageinfo module. -pub struct CrateCoverageContext<'ll, 'tcx> { +pub(crate) struct CrateCoverageContext<'ll, 'tcx> { /// Coverage data for each instrumented function identified by DefId. pub(crate) function_coverage_map: RefCell<FxIndexMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>>>, @@ -35,7 +34,7 @@ pub struct CrateCoverageContext<'ll, 'tcx> { } impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> { - pub fn new() -> Self { + pub(crate) fn new() -> Self { Self { function_coverage_map: Default::default(), pgo_func_name_var_map: Default::default(), @@ -43,17 +42,16 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> { } } - pub fn take_function_coverage_map( + fn take_function_coverage_map( &self, ) -> FxIndexMap<Instance<'tcx>, FunctionCoverageCollector<'tcx>> { self.function_coverage_map.replace(FxIndexMap::default()) } - /// LLVM use a temp value to record evaluated mcdc test vector of each decision, which is called condition bitmap. - /// In order to handle nested decisions, several condition bitmaps can be - /// allocated for a function body. - /// These values are named `mcdc.addr.{i}` and are a 32-bit integers. - /// They respectively hold the condition bitmaps for decisions with a depth of `i`. + /// LLVM use a temp value to record evaluated mcdc test vector of each decision, which is + /// called condition bitmap. In order to handle nested decisions, several condition bitmaps can + /// be allocated for a function body. These values are named `mcdc.addr.{i}` and are a 32-bit + /// integers. They respectively hold the condition bitmaps for decisions with a depth of `i`. fn try_get_mcdc_condition_bitmap( &self, instance: &Instance<'tcx>, @@ -158,8 +156,8 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { ), CoverageKind::CounterIncrement { id } => { func_coverage.mark_counter_id_seen(id); - // We need to explicitly drop the `RefMut` before calling into `instrprof_increment`, - // as that needs an exclusive borrow. + // We need to explicitly drop the `RefMut` before calling into + // `instrprof_increment`, as that needs an exclusive borrow. drop(coverage_map); // The number of counters passed to `llvm.instrprof.increment` might diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs index 6a63eda4b99..c3087d8ec30 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs @@ -1,22 +1,21 @@ -use super::metadata::file_metadata; -use super::utils::DIB; use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext}; use rustc_codegen_ssa::traits::*; - -use crate::common::CodegenCx; -use crate::llvm; -use crate::llvm::debuginfo::{DILocation, DIScope}; +use rustc_index::bit_set::BitSet; +use rustc_index::Idx; use rustc_middle::mir::{Body, SourceScope}; use rustc_middle::ty::layout::FnAbiOf; use rustc_middle::ty::{self, Instance}; use rustc_session::config::DebugInfo; -use rustc_index::bit_set::BitSet; -use rustc_index::Idx; +use super::metadata::file_metadata; +use super::utils::DIB; +use crate::common::CodegenCx; +use crate::llvm; +use crate::llvm::debuginfo::{DILocation, DIScope}; /// Produces DIScope DIEs for each MIR Scope which has variables defined in it. // FIXME(eddyb) almost all of this should be in `rustc_codegen_ssa::mir::debuginfo`. -pub fn compute_mir_scopes<'ll, 'tcx>( +pub(crate) fn compute_mir_scopes<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>, mir: &Body<'tcx>, @@ -89,7 +88,7 @@ fn make_mir_scope<'ll, 'tcx>( let loc = cx.lookup_debug_loc(scope_data.span.lo()); let file_metadata = file_metadata(cx, &loc.file); - let parent_dbg_scope = match scope_data.inlined { + let dbg_scope = match scope_data.inlined { Some((callee, _)) => { // FIXME(eddyb) this would be `self.monomorphize(&callee)` // if this is moved to `rustc_codegen_ssa::mir::debuginfo`. @@ -103,17 +102,15 @@ fn make_mir_scope<'ll, 'tcx>( cx.dbg_scope_fn(callee, callee_fn_abi, None) }) } - None => parent_scope.dbg_scope, - }; - - let dbg_scope = unsafe { - llvm::LLVMRustDIBuilderCreateLexicalBlock( - DIB(cx), - parent_dbg_scope, - file_metadata, - loc.line, - loc.col, - ) + None => unsafe { + llvm::LLVMRustDIBuilderCreateLexicalBlock( + DIB(cx), + parent_scope.dbg_scope, + file_metadata, + loc.line, + loc.col, + ) + }, }; let inlined_at = scope_data.inlined.map(|(_, callsite_span)| { diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index d82b1e1e721..f93d3e40b20 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -1,21 +1,22 @@ // .debug_gdb_scripts binary section. -use crate::llvm; - -use crate::builder::Builder; -use crate::common::CodegenCx; -use crate::value::Value; use rustc_ast::attr; use rustc_codegen_ssa::base::collect_debugger_visualizers_transitive; use rustc_codegen_ssa::traits::*; use rustc_hir::def_id::LOCAL_CRATE; -use rustc_middle::{bug, middle::debugger_visualizer::DebuggerVisualizerType}; +use rustc_middle::bug; +use rustc_middle::middle::debugger_visualizer::DebuggerVisualizerType; use rustc_session::config::{CrateType, DebugInfo}; use rustc_span::symbol::sym; +use crate::builder::Builder; +use crate::common::CodegenCx; +use crate::llvm; +use crate::value::Value; + /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. -pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) { +pub(crate) fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) { if needs_gdb_debug_scripts_section(bx) { let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx); // Load just the first byte as that's all that's necessary to force @@ -29,12 +30,13 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, /// Allocates the global variable responsible for the .debug_gdb_scripts binary /// section. -pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll Value { +pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>( + cx: &CodegenCx<'ll, '_>, +) -> &'ll Value { let c_section_var_name = c"__rustc_debug_gdb_scripts_section__"; let section_var_name = c_section_var_name.to_str().unwrap(); - let section_var = - unsafe { llvm::LLVMGetNamedGlobal(cx.llmod, c_section_var_name.as_ptr().cast()) }; + let section_var = unsafe { llvm::LLVMGetNamedGlobal(cx.llmod, c_section_var_name.as_ptr()) }; section_var.unwrap_or_else(|| { let mut section_contents = Vec::new(); @@ -42,7 +44,8 @@ pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, ' // Add the pretty printers for the standard library first. section_contents.extend_from_slice(b"\x01gdb_load_rust_pretty_printers.py\0"); - // Next, add the pretty printers that were specified via the `#[debugger_visualizer]` attribute. + // Next, add the pretty printers that were specified via the `#[debugger_visualizer]` + // attribute. let visualizers = collect_debugger_visualizers_transitive( cx.tcx, DebuggerVisualizerType::GdbPrettyPrinter, @@ -69,7 +72,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, ' let section_var = cx .define_global(section_var_name, llvm_type) .unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name)); - llvm::LLVMSetSection(section_var, c".debug_gdb_scripts".as_ptr().cast()); + llvm::LLVMSetSection(section_var, c".debug_gdb_scripts".as_ptr()); llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global); @@ -82,7 +85,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global<'ll>(cx: &CodegenCx<'ll, ' }) } -pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool { +pub(crate) fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool { let omit_gdb_pretty_printer_section = attr::contains_name(cx.tcx.hir().krate_attrs(), sym::omit_gdb_pretty_printer_section); diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 364c35f3107..6c84a40defb 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -1,32 +1,14 @@ -use self::type_map::DINodeCreationResult; -use self::type_map::Stub; -use self::type_map::UniqueTypeId; - -use super::namespace::mangled_name_of_instance; -use super::type_names::{compute_debuginfo_type_name, compute_debuginfo_vtable_name}; -use super::utils::{ - create_DIArray, debug_context, get_namespace_for_item, is_node_local_to_unit, DIB, -}; -use super::CodegenUnitDebugContext; - -use crate::abi; -use crate::common::CodegenCx; -use crate::debuginfo::metadata::type_map::build_type_with_children; -use crate::debuginfo::utils::fat_pointer_kind; -use crate::debuginfo::utils::FatPtrKind; -use crate::llvm; -use crate::llvm::debuginfo::{ - DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType, DebugEmissionKind, - DebugNameTableKind, -}; -use crate::value::Value; +use std::borrow::Cow; +use std::fmt::{self, Write}; +use std::hash::{Hash, Hasher}; +use std::path::{Path, PathBuf}; +use std::{iter, ptr}; -use rustc_codegen_ssa::debuginfo::type_names::cpp_like_debuginfo; -use rustc_codegen_ssa::debuginfo::type_names::VTableNameKind; +use libc::{c_char, c_longlong, c_uint}; +use rustc_codegen_ssa::debuginfo::type_names::{cpp_like_debuginfo, VTableNameKind}; use rustc_codegen_ssa::traits::*; use rustc_fs_util::path_to_c_string; -use rustc_hir::def::CtorKind; -use rustc_hir::def::DefKind; +use rustc_hir::def::{CtorKind, DefKind}; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_middle::bug; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; @@ -36,21 +18,29 @@ use rustc_middle::ty::{ }; use rustc_session::config::{self, DebugInfo, Lto}; use rustc_span::symbol::Symbol; -use rustc_span::{hygiene, FileName, DUMMY_SP}; -use rustc_span::{FileNameDisplayPreference, SourceFile}; +use rustc_span::{hygiene, FileName, FileNameDisplayPreference, SourceFile, DUMMY_SP}; use rustc_symbol_mangling::typeid_for_trait_ref; use rustc_target::abi::{Align, Size}; use rustc_target::spec::DebuginfoKind; use smallvec::smallvec; use tracing::{debug, instrument}; -use libc::{c_char, c_longlong, c_uint}; -use std::borrow::Cow; -use std::fmt::{self, Write}; -use std::hash::{Hash, Hasher}; -use std::iter; -use std::path::{Path, PathBuf}; -use std::ptr; +use self::type_map::{DINodeCreationResult, Stub, UniqueTypeId}; +use super::namespace::mangled_name_of_instance; +use super::type_names::{compute_debuginfo_type_name, compute_debuginfo_vtable_name}; +use super::utils::{ + create_DIArray, debug_context, get_namespace_for_item, is_node_local_to_unit, DIB, +}; +use super::CodegenUnitDebugContext; +use crate::common::CodegenCx; +use crate::debuginfo::metadata::type_map::build_type_with_children; +use crate::debuginfo::utils::{fat_pointer_kind, FatPtrKind}; +use crate::llvm::debuginfo::{ + DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType, DebugEmissionKind, + DebugNameTableKind, +}; +use crate::value::Value; +use crate::{abi, llvm}; impl PartialEq for llvm::Metadata { fn eq(&self, other: &Self) -> bool { @@ -95,7 +85,7 @@ const NO_GENERICS: for<'ll> fn(&CodegenCx<'ll, '_>) -> SmallVec<&'ll DIType> = | // SmallVec is used quite a bit in this module, so create a shorthand. // The actual number of elements is not so important. -pub type SmallVec<T> = smallvec::SmallVec<[T; 16]>; +type SmallVec<T> = smallvec::SmallVec<[T; 16]>; mod enums; mod type_map; @@ -226,6 +216,9 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>( // need to make sure that we don't break existing debuginfo consumers // by doing that (at least not without a warning period). let layout_type = if ptr_type.is_box() { + // The assertion at the start of this function ensures we have a ZST + // allocator. We'll make debuginfo "skip" all ZST allocators, not just the + // default allocator. Ty::new_mut_ptr(cx.tcx, pointee_type) } else { ptr_type @@ -288,8 +281,7 @@ fn build_subroutine_type_di_node<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, unique_type_id: UniqueTypeId<'tcx>, ) -> DINodeCreationResult<'ll> { - // It's possible to create a self-referential - // type in Rust by using 'impl trait': + // It's possible to create a self-referential type in Rust by using 'impl trait': // // fn foo() -> impl Copy { foo } // @@ -435,7 +427,7 @@ fn build_slice_type_di_node<'ll, 'tcx>( /// /// This function will look up the debuginfo node in the TypeMap. If it can't find it, it /// will create the node by dispatching to the corresponding `build_*_di_node()` function. -pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { +pub(crate) fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { let unique_type_id = UniqueTypeId::for_ty(cx.tcx, t); if let Some(existing_di_node) = debug_context(cx).type_map.di_node_for_unique_id(unique_type_id) @@ -464,9 +456,9 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D if def.is_box() && args.get(1).map_or(true, |arg| cx.layout_of(arg.expect_ty()).is_1zst()) => { - build_pointer_or_reference_di_node(cx, t, t.boxed_ty(), unique_type_id) + build_pointer_or_reference_di_node(cx, t, t.expect_boxed_ty(), unique_type_id) } - ty::FnDef(..) | ty::FnPtr(_) => build_subroutine_type_di_node(cx, unique_type_id), + ty::FnDef(..) | ty::FnPtr(..) => build_subroutine_type_di_node(cx, unique_type_id), ty::Closure(..) => build_closure_env_di_node(cx, unique_type_id), ty::CoroutineClosure(..) => build_closure_env_di_node(cx, unique_type_id), ty::Coroutine(..) => enums::build_coroutine_di_node(cx, unique_type_id), @@ -541,7 +533,7 @@ fn hex_encode(data: &[u8]) -> String { hex_string } -pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> &'ll DIFile { +pub(crate) fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> &'ll DIFile { let cache_key = Some((source_file.stable_id, source_file.src_hash)); return debug_context(cx) .created_files @@ -581,14 +573,14 @@ pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> { // If the compiler's working directory (which also is the DW_AT_comp_dir of // the compilation unit) is a prefix of the path we are about to emit, then - // only emit the part relative to the working directory. - // Because of path remapping we sometimes see strange things here: `abs_path` - // might actually look like a relative path - // (e.g. `<crate-name-and-version>/src/lib.rs`), so if we emit it without - // taking the working directory into account, downstream tooling will - // interpret it as `<working-directory>/<crate-name-and-version>/src/lib.rs`, - // which makes no sense. Usually in such cases the working directory will also - // be remapped to `<crate-name-and-version>` or some other prefix of the path + // only emit the part relative to the working directory. Because of path + // remapping we sometimes see strange things here: `abs_path` might + // actually look like a relative path (e.g. + // `<crate-name-and-version>/src/lib.rs`), so if we emit it without taking + // the working directory into account, downstream tooling will interpret it + // as `<working-directory>/<crate-name-and-version>/src/lib.rs`, which + // makes no sense. Usually in such cases the working directory will also be + // remapped to `<crate-name-and-version>` or some other prefix of the path // we are remapping, so we end up with // `<crate-name-and-version>/<crate-name-and-version>/src/lib.rs`. // By moving the working directory portion into the `directory` part of the @@ -639,6 +631,9 @@ pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> }; let hash_value = hex_encode(source_file.src_hash.hash_bytes()); + let source = + cx.sess().opts.unstable_opts.embed_source.then_some(()).and(source_file.src.as_ref()); + unsafe { llvm::LLVMRustDIBuilderCreateFile( DIB(cx), @@ -649,12 +644,14 @@ pub fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFile) -> hash_kind, hash_value.as_ptr().cast(), hash_value.len(), + source.map_or(ptr::null(), |x| x.as_ptr().cast()), + source.map_or(0, |x| x.len()), ) } } } -pub fn unknown_file_metadata<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile { +fn unknown_file_metadata<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile { debug_context(cx).created_files.borrow_mut().entry(None).or_insert_with(|| unsafe { let file_name = "<unknown>"; let directory = ""; @@ -669,6 +666,8 @@ pub fn unknown_file_metadata<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile { llvm::ChecksumKind::None, hash_value.as_ptr().cast(), hash_value.len(), + ptr::null(), + 0, ) }) } @@ -869,12 +868,13 @@ fn build_param_type_di_node<'ll, 'tcx>( } } -pub fn build_compile_unit_di_node<'ll, 'tcx>( +pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>( tcx: TyCtxt<'tcx>, codegen_unit_name: &str, debug_context: &CodegenUnitDebugContext<'ll, 'tcx>, ) -> &'ll DIDescriptor { - use rustc_session::{config::RemapPathScopeComponents, RemapFileNameExt}; + use rustc_session::config::RemapPathScopeComponents; + use rustc_session::RemapFileNameExt; let mut name_in_debuginfo = tcx .sess .local_crate_source_file() @@ -952,6 +952,8 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>( llvm::ChecksumKind::None, ptr::null(), 0, + ptr::null(), + 0, ); let unit_metadata = llvm::LLVMRustDIBuilderCreateCompileUnit( @@ -961,7 +963,7 @@ pub fn build_compile_unit_di_node<'ll, 'tcx>( producer.as_ptr().cast(), producer.len(), tcx.sess.opts.optimize != config::OptLevel::No, - c"".as_ptr().cast(), + c"".as_ptr(), 0, // NB: this doesn't actually have any perceptible effect, it seems. LLVM will instead // put the path supplied to `MCSplitDwarfFile` into the debug info of the final @@ -1328,7 +1330,11 @@ fn build_generic_type_param_di_nodes<'ll, 'tcx>( /// Creates debug information for the given global variable. /// /// Adds the created debuginfo nodes directly to the crate's IR. -pub fn build_global_var_di_node<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId, global: &'ll Value) { +pub(crate) fn build_global_var_di_node<'ll>( + cx: &CodegenCx<'ll, '_>, + def_id: DefId, + global: &'ll Value, +) { if cx.dbg_cx.is_none() { return; } @@ -1568,7 +1574,7 @@ pub(crate) fn apply_vcall_visibility_metadata<'ll, 'tcx>( /// given type. /// /// Adds the created metadata nodes directly to the crate's IR. -pub fn create_vtable_di_node<'ll, 'tcx>( +pub(crate) fn create_vtable_di_node<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>, poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>, @@ -1613,7 +1619,7 @@ pub fn create_vtable_di_node<'ll, 'tcx>( } /// Creates an "extension" of an existing `DIScope` into another file. -pub fn extend_scope_to_file<'ll>( +pub(crate) fn extend_scope_to_file<'ll>( cx: &CodegenCx<'ll, '_>, scope_metadata: &'ll DIScope, file: &SourceFile, @@ -1622,7 +1628,7 @@ pub fn extend_scope_to_file<'ll>( unsafe { llvm::LLVMRustDIBuilderCreateLexicalBlockFile(DIB(cx), scope_metadata, file_metadata) } } -pub fn tuple_field_name(field_index: usize) -> Cow<'static, str> { +fn tuple_field_name(field_index: usize) -> Cow<'static, str> { const TUPLE_FIELD_NAMES: [&'static str; 16] = [ "__0", "__1", "__2", "__3", "__4", "__5", "__6", "__7", "__8", "__9", "__10", "__11", "__12", "__13", "__14", "__15", diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs index cf7dddce84f..8a132f89aa3 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs @@ -1,41 +1,27 @@ use std::borrow::Cow; use libc::c_uint; -use rustc_codegen_ssa::{ - debuginfo::{type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo}, - traits::ConstMethods, -}; - +use rustc_codegen_ssa::debuginfo::type_names::compute_debuginfo_type_name; +use rustc_codegen_ssa::debuginfo::{tag_base_type, wants_c_like_enum_debuginfo}; +use rustc_codegen_ssa::traits::ConstCodegenMethods; use rustc_index::IndexVec; -use rustc_middle::{ - bug, - ty::{ - self, - layout::{LayoutOf, TyAndLayout}, - AdtDef, CoroutineArgs, CoroutineArgsExt, Ty, - }, -}; +use rustc_middle::bug; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_middle::ty::{self, AdtDef, CoroutineArgs, CoroutineArgsExt, Ty}; use rustc_target::abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants}; use smallvec::smallvec; -use crate::{ - common::CodegenCx, - debuginfo::{ - metadata::{ - build_field_di_node, - enums::{tag_base_type, DiscrResult}, - file_metadata, size_and_align_of, type_di_node, - type_map::{self, Stub, UniqueTypeId}, - unknown_file_metadata, visibility_di_flags, DINodeCreationResult, SmallVec, - NO_GENERICS, NO_SCOPE_METADATA, UNKNOWN_LINE_NUMBER, - }, - utils::DIB, - }, - llvm::{ - self, - debuginfo::{DIFile, DIFlags, DIType}, - }, +use crate::common::CodegenCx; +use crate::debuginfo::metadata::enums::DiscrResult; +use crate::debuginfo::metadata::type_map::{self, Stub, UniqueTypeId}; +use crate::debuginfo::metadata::{ + build_field_di_node, file_metadata, size_and_align_of, type_di_node, unknown_file_metadata, + visibility_di_flags, DINodeCreationResult, SmallVec, NO_GENERICS, NO_SCOPE_METADATA, + UNKNOWN_LINE_NUMBER, }; +use crate::debuginfo::utils::DIB; +use crate::llvm::debuginfo::{DIFile, DIFlags, DIType}; +use crate::llvm::{self}; // The names of the associated constants in each variant wrapper struct. // These have to match up with the names being used in `intrinsic.natvis`. @@ -204,7 +190,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>( let enum_type_and_layout = cx.layout_of(enum_type); let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false); - assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout)); + assert!(!wants_c_like_enum_debuginfo(cx.tcx, enum_type_and_layout)); type_map::build_type_with_children( cx, @@ -279,7 +265,7 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>( let coroutine_type_and_layout = cx.layout_of(coroutine_type); let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false); - assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout)); + assert!(!wants_c_like_enum_debuginfo(cx.tcx, coroutine_type_and_layout)); type_map::build_type_with_children( cx, @@ -395,7 +381,7 @@ fn build_union_fields_for_enum<'ll, 'tcx>( tag_field: usize, untagged_variant_index: Option<VariantIdx>, ) -> SmallVec<&'ll DIType> { - let tag_base_type = super::tag_base_type(cx, enum_type_and_layout); + let tag_base_type = tag_base_type(cx.tcx, enum_type_and_layout); let variant_names_type_di_node = build_variant_names_type_di_node( cx, @@ -690,7 +676,7 @@ fn build_union_fields_for_direct_tag_coroutine<'ll, 'tcx>( let variant_range = coroutine_args.variant_range(coroutine_def_id, cx.tcx); let variant_count = (variant_range.start.as_u32()..variant_range.end.as_u32()).len(); - let tag_base_type = tag_base_type(cx, coroutine_type_and_layout); + let tag_base_type = tag_base_type(cx.tcx, coroutine_type_and_layout); let variant_names_type_di_node = build_variant_names_type_di_node( cx, @@ -817,7 +803,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( assert_eq!( cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty), - cx.size_and_align_of(super::tag_base_type(cx, enum_type_and_layout)) + cx.size_and_align_of(self::tag_base_type(cx.tcx, enum_type_and_layout)) ); // ... and a field for the tag. If the tag is 128 bits wide, this will actually diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs index 96be1900ab2..edaf73b74a2 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs @@ -1,45 +1,27 @@ -use rustc_codegen_ssa::debuginfo::{ - type_names::{compute_debuginfo_type_name, cpp_like_debuginfo}, - wants_c_like_enum_debuginfo, -}; +use std::borrow::Cow; + +use rustc_codegen_ssa::debuginfo::type_names::{compute_debuginfo_type_name, cpp_like_debuginfo}; +use rustc_codegen_ssa::debuginfo::{tag_base_type, wants_c_like_enum_debuginfo}; use rustc_hir::def::CtorKind; use rustc_index::IndexSlice; -use rustc_middle::{ - bug, - mir::CoroutineLayout, - ty::{ - self, - layout::{IntegerExt, LayoutOf, PrimitiveExt, TyAndLayout}, - AdtDef, CoroutineArgs, CoroutineArgsExt, Ty, VariantDef, - }, -}; +use rustc_middle::bug; +use rustc_middle::mir::CoroutineLayout; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_middle::ty::{self, AdtDef, CoroutineArgs, CoroutineArgsExt, Ty, VariantDef}; use rustc_span::Symbol; -use rustc_target::abi::{ - FieldIdx, HasDataLayout, Integer, Primitive, TagEncoding, VariantIdx, Variants, -}; -use std::borrow::Cow; - -use crate::{ - common::CodegenCx, - debuginfo::{ - metadata::{ - build_field_di_node, build_generic_type_param_di_nodes, type_di_node, - type_map::{self, Stub}, - unknown_file_metadata, UNKNOWN_LINE_NUMBER, - }, - utils::{create_DIArray, get_namespace_for_item, DIB}, - }, - llvm::{ - self, - debuginfo::{DIFlags, DIType}, - }, -}; - -use super::{ - size_and_align_of, - type_map::{DINodeCreationResult, UniqueTypeId}, - SmallVec, +use rustc_target::abi::{FieldIdx, TagEncoding, VariantIdx, Variants}; + +use super::type_map::{DINodeCreationResult, UniqueTypeId}; +use super::{size_and_align_of, SmallVec}; +use crate::common::CodegenCx; +use crate::debuginfo::metadata::type_map::{self, Stub}; +use crate::debuginfo::metadata::{ + build_field_di_node, build_generic_type_param_di_nodes, type_di_node, unknown_file_metadata, + UNKNOWN_LINE_NUMBER, }; +use crate::debuginfo::utils::{create_DIArray, get_namespace_for_item, DIB}; +use crate::llvm::debuginfo::{DIFlags, DIType}; +use crate::llvm::{self}; mod cpp_like; mod native; @@ -55,7 +37,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>( let enum_type_and_layout = cx.layout_of(enum_type); - if wants_c_like_enum_debuginfo(enum_type_and_layout) { + if wants_c_like_enum_debuginfo(cx.tcx, enum_type_and_layout) { return build_c_style_enum_di_node(cx, enum_adt_def, enum_type_and_layout); } @@ -90,7 +72,7 @@ fn build_c_style_enum_di_node<'ll, 'tcx>( di_node: build_enumeration_type_di_node( cx, &compute_debuginfo_type_name(cx.tcx, enum_type_and_layout.ty, false), - tag_base_type(cx, enum_type_and_layout), + tag_base_type(cx.tcx, enum_type_and_layout), enum_adt_def.discriminants(cx.tcx).map(|(variant_index, discr)| { let name = Cow::from(enum_adt_def.variant(variant_index).name.as_str()); (name, discr.val) @@ -101,48 +83,6 @@ fn build_c_style_enum_di_node<'ll, 'tcx>( } } -/// Extract the type with which we want to describe the tag of the given enum or coroutine. -fn tag_base_type<'ll, 'tcx>( - cx: &CodegenCx<'ll, 'tcx>, - enum_type_and_layout: TyAndLayout<'tcx>, -) -> Ty<'tcx> { - assert!(match enum_type_and_layout.ty.kind() { - ty::Coroutine(..) => true, - ty::Adt(adt_def, _) => adt_def.is_enum(), - _ => false, - }); - - match enum_type_and_layout.layout.variants() { - // A single-variant enum has no discriminant. - Variants::Single { .. } => { - bug!("tag_base_type() called for enum without tag: {:?}", enum_type_and_layout) - } - - Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } => { - // Niche tags are always normalized to unsized integers of the correct size. - match tag.primitive() { - Primitive::Int(t, _) => t, - Primitive::Float(f) => Integer::from_size(f.size()).unwrap(), - // FIXME(erikdesjardins): handle non-default addrspace ptr sizes - Primitive::Pointer(_) => { - // If the niche is the NULL value of a reference, then `discr_enum_ty` will be - // a RawPtr. CodeView doesn't know what to do with enums whose base type is a - // pointer so we fix this up to just be `usize`. - // DWARF might be able to deal with this but with an integer type we are on - // the safe side there too. - cx.data_layout().ptr_sized_integer() - } - } - .to_ty(cx.tcx, false) - } - - Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => { - // Direct tags preserve the sign. - tag.primitive().to_ty(cx.tcx) - } - } -} - /// Build a DW_TAG_enumeration_type debuginfo node, with the given base type and variants. /// This is a helper function and does not register anything in the type map by itself. /// @@ -317,7 +257,7 @@ fn build_enum_variant_struct_type_di_node<'ll, 'tcx>( /// ---> DW_TAG_structure_type (type of variant 3) /// /// ``` -pub fn build_coroutine_variant_struct_type_di_node<'ll, 'tcx>( +fn build_coroutine_variant_struct_type_di_node<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, variant_index: VariantIdx, coroutine_type_and_layout: TyAndLayout<'tcx>, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs index 63a9ce2fdf9..0b3140cc91f 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs @@ -1,37 +1,25 @@ use std::borrow::Cow; -use crate::{ - common::CodegenCx, - debuginfo::{ - metadata::{ - enums::tag_base_type, - file_metadata, size_and_align_of, type_di_node, - type_map::{self, Stub, StubInfo, UniqueTypeId}, - unknown_file_metadata, visibility_di_flags, DINodeCreationResult, SmallVec, - NO_GENERICS, UNKNOWN_LINE_NUMBER, - }, - utils::{create_DIArray, get_namespace_for_item, DIB}, - }, - llvm::{ - self, - debuginfo::{DIFile, DIFlags, DIType}, - }, -}; use libc::c_uint; -use rustc_codegen_ssa::{ - debuginfo::{type_names::compute_debuginfo_type_name, wants_c_like_enum_debuginfo}, - traits::ConstMethods, -}; -use rustc_middle::{ - bug, - ty::{ - self, - layout::{LayoutOf, TyAndLayout}, - }, -}; +use rustc_codegen_ssa::debuginfo::type_names::compute_debuginfo_type_name; +use rustc_codegen_ssa::debuginfo::{tag_base_type, wants_c_like_enum_debuginfo}; +use rustc_codegen_ssa::traits::ConstCodegenMethods; +use rustc_middle::bug; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; +use rustc_middle::ty::{self}; use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants}; use smallvec::smallvec; +use crate::common::CodegenCx; +use crate::debuginfo::metadata::type_map::{self, Stub, StubInfo, UniqueTypeId}; +use crate::debuginfo::metadata::{ + file_metadata, size_and_align_of, type_di_node, unknown_file_metadata, visibility_di_flags, + DINodeCreationResult, SmallVec, NO_GENERICS, UNKNOWN_LINE_NUMBER, +}; +use crate::debuginfo::utils::{create_DIArray, get_namespace_for_item, DIB}; +use crate::llvm::debuginfo::{DIFile, DIFlags, DIType}; +use crate::llvm::{self}; + /// Build the debuginfo node for an enum type. The listing below shows how such a /// type looks like at the LLVM IR/DWARF level. It is a `DW_TAG_structure_type` /// with a single `DW_TAG_variant_part` that in turn contains a `DW_TAG_variant` @@ -65,7 +53,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>( let visibility_flags = visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did()); - assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout)); + assert!(!wants_c_like_enum_debuginfo(cx.tcx, enum_type_and_layout)); type_map::build_type_with_children( cx, @@ -142,7 +130,7 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>( let containing_scope = get_namespace_for_item(cx, coroutine_def_id); let coroutine_type_and_layout = cx.layout_of(coroutine_type); - assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout)); + assert!(!wants_c_like_enum_debuginfo(cx.tcx, coroutine_type_and_layout)); let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false); @@ -332,7 +320,7 @@ fn build_discr_member_di_node<'ll, 'tcx>( &Variants::Single { .. } => None, &Variants::Multiple { tag_field, .. } => { - let tag_base_type = tag_base_type(cx, enum_or_coroutine_type_and_layout); + let tag_base_type = tag_base_type(cx.tcx, enum_or_coroutine_type_and_layout); let (size, align) = cx.size_and_align_of(tag_base_type); unsafe { diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs index 17931911f87..6d21f4204e3 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs @@ -1,27 +1,18 @@ use std::cell::RefCell; -use rustc_data_structures::{ - fingerprint::Fingerprint, - fx::FxHashMap, - stable_hasher::{HashStable, StableHasher}, -}; +use rustc_data_structures::fingerprint::Fingerprint; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_macros::HashStable; -use rustc_middle::{ - bug, - ty::{ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}, -}; +use rustc_middle::bug; +use rustc_middle::ty::{ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}; use rustc_target::abi::{Align, Size, VariantIdx}; -use crate::{ - common::CodegenCx, - debuginfo::utils::{create_DIArray, debug_context, DIB}, - llvm::{ - self, - debuginfo::{DIFlags, DIScope, DIType}, - }, -}; - use super::{unknown_file_metadata, SmallVec, UNKNOWN_LINE_NUMBER}; +use crate::common::CodegenCx; +use crate::debuginfo::utils::{create_DIArray, debug_context, DIB}; +use crate::llvm::debuginfo::{DIFlags, DIScope, DIType}; +use crate::llvm::{self}; mod private { use rustc_macros::HashStable; @@ -31,7 +22,7 @@ mod private { // `UniqueTypeId` from being constructed directly, without asserting // the preconditions. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, HashStable)] - pub struct HiddenZst; + pub(crate) struct HiddenZst; } /// A unique identifier for anything that we create a debuginfo node for. @@ -57,17 +48,17 @@ pub(super) enum UniqueTypeId<'tcx> { } impl<'tcx> UniqueTypeId<'tcx> { - pub fn for_ty(tcx: TyCtxt<'tcx>, t: Ty<'tcx>) -> Self { + pub(crate) fn for_ty(tcx: TyCtxt<'tcx>, t: Ty<'tcx>) -> Self { assert_eq!(t, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t)); UniqueTypeId::Ty(t, private::HiddenZst) } - pub fn for_enum_variant_part(tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>) -> Self { + pub(crate) fn for_enum_variant_part(tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>) -> Self { assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty)); UniqueTypeId::VariantPart(enum_ty, private::HiddenZst) } - pub fn for_enum_variant_struct_type( + pub(crate) fn for_enum_variant_struct_type( tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>, variant_idx: VariantIdx, @@ -76,7 +67,7 @@ impl<'tcx> UniqueTypeId<'tcx> { UniqueTypeId::VariantStructType(enum_ty, variant_idx, private::HiddenZst) } - pub fn for_enum_variant_struct_type_wrapper( + pub(crate) fn for_enum_variant_struct_type_wrapper( tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>, variant_idx: VariantIdx, @@ -85,7 +76,7 @@ impl<'tcx> UniqueTypeId<'tcx> { UniqueTypeId::VariantStructTypeCppLikeWrapper(enum_ty, variant_idx, private::HiddenZst) } - pub fn for_vtable_ty( + pub(crate) fn for_vtable_ty( tcx: TyCtxt<'tcx>, self_type: Ty<'tcx>, implemented_trait: Option<PolyExistentialTraitRef<'tcx>>, @@ -102,7 +93,7 @@ impl<'tcx> UniqueTypeId<'tcx> { /// argument of the various `LLVMRustDIBuilderCreate*Type()` methods. /// /// Right now this takes the form of a hex-encoded opaque hash value. - pub fn generate_unique_id_string(self, tcx: TyCtxt<'tcx>) -> String { + fn generate_unique_id_string(self, tcx: TyCtxt<'tcx>) -> String { let mut hasher = StableHasher::new(); tcx.with_stable_hashing_context(|mut hcx| { hcx.while_hashing_spans(false, |hcx| self.hash_stable(hcx, &mut hasher)) @@ -110,7 +101,7 @@ impl<'tcx> UniqueTypeId<'tcx> { hasher.finish::<Fingerprint>().to_hex() } - pub fn expect_ty(self) -> Ty<'tcx> { + pub(crate) fn expect_ty(self) -> Ty<'tcx> { match self { UniqueTypeId::Ty(ty, _) => ty, _ => bug!("Expected `UniqueTypeId::Ty` but found `{:?}`", self), @@ -142,25 +133,25 @@ impl<'ll, 'tcx> TypeMap<'ll, 'tcx> { } } -pub struct DINodeCreationResult<'ll> { +pub(crate) struct DINodeCreationResult<'ll> { pub di_node: &'ll DIType, pub already_stored_in_typemap: bool, } impl<'ll> DINodeCreationResult<'ll> { - pub fn new(di_node: &'ll DIType, already_stored_in_typemap: bool) -> Self { + pub(crate) fn new(di_node: &'ll DIType, already_stored_in_typemap: bool) -> Self { DINodeCreationResult { di_node, already_stored_in_typemap } } } #[derive(Debug, Copy, Clone, Eq, PartialEq)] -pub enum Stub<'ll> { +pub(crate) enum Stub<'ll> { Struct, Union, VTableTy { vtable_holder: &'ll DIType }, } -pub struct StubInfo<'ll, 'tcx> { +pub(crate) struct StubInfo<'ll, 'tcx> { metadata: &'ll DIType, unique_type_id: UniqueTypeId<'tcx>, } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index 3486ce4becb..920c9e06be4 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -1,33 +1,21 @@ #![doc = include_str!("doc.md")] -use rustc_codegen_ssa::mir::debuginfo::VariableKind::*; -use rustc_data_structures::unord::UnordMap; - -use self::metadata::{file_metadata, type_di_node}; -use self::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER}; -use self::namespace::mangled_name_of_instance; -use self::utils::{create_DIArray, is_node_local_to_unit, DIB}; - -use crate::abi::FnAbi; -use crate::builder::Builder; -use crate::common::CodegenCx; -use crate::llvm; -use crate::llvm::debuginfo::{ - DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, DIType, - DIVariable, -}; -use crate::value::Value; +use std::cell::{OnceCell, RefCell}; +use std::ops::Range; +use std::{iter, ptr}; +use libc::c_uint; use rustc_codegen_ssa::debuginfo::type_names; +use rustc_codegen_ssa::mir::debuginfo::VariableKind::*; use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind}; use rustc_codegen_ssa::traits::*; use rustc_data_structures::sync::Lrc; +use rustc_data_structures::unord::UnordMap; use rustc_hir::def_id::{DefId, DefIdMap}; use rustc_index::IndexVec; use rustc_middle::mir; use rustc_middle::ty::layout::LayoutOf; -use rustc_middle::ty::GenericArgsRef; -use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeVisitableExt}; +use rustc_middle::ty::{self, GenericArgsRef, Instance, ParamEnv, Ty, TypeVisitableExt}; use rustc_session::config::{self, DebugInfo}; use rustc_session::Session; use rustc_span::symbol::Symbol; @@ -35,23 +23,30 @@ use rustc_span::{ BytePos, Pos, SourceFile, SourceFileAndLine, SourceFileHash, Span, StableSourceFileId, }; use rustc_target::abi::Size; - -use libc::c_uint; use smallvec::SmallVec; -use std::cell::OnceCell; -use std::cell::RefCell; -use std::iter; -use std::ops::Range; use tracing::debug; +use self::metadata::{file_metadata, type_di_node, UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER}; +use self::namespace::mangled_name_of_instance; +use self::utils::{create_DIArray, is_node_local_to_unit, DIB}; +use crate::abi::FnAbi; +use crate::builder::Builder; +use crate::common::CodegenCx; +use crate::llvm; +use crate::llvm::debuginfo::{ + DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, DIType, + DIVariable, +}; +use crate::value::Value; + mod create_scope_map; -pub mod gdb; -pub mod metadata; +mod gdb; +pub(crate) mod metadata; mod namespace; mod utils; -pub use self::create_scope_map::compute_mir_scopes; -pub use self::metadata::build_global_var_di_node; +use self::create_scope_map::compute_mir_scopes; +pub(crate) use self::metadata::build_global_var_di_node; #[allow(non_upper_case_globals)] const DW_TAG_auto_variable: c_uint = 0x100; @@ -59,7 +54,7 @@ const DW_TAG_auto_variable: c_uint = 0x100; const DW_TAG_arg_variable: c_uint = 0x101; /// A context object for maintaining all state needed by the debuginfo module. -pub struct CodegenUnitDebugContext<'ll, 'tcx> { +pub(crate) struct CodegenUnitDebugContext<'ll, 'tcx> { llcontext: &'ll llvm::Context, llmod: &'ll llvm::Module, builder: &'ll mut DIBuilder<'ll>, @@ -79,7 +74,7 @@ impl Drop for CodegenUnitDebugContext<'_, '_> { } impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { - pub fn new(llmod: &'ll llvm::Module) -> Self { + pub(crate) fn new(llmod: &'ll llvm::Module) -> Self { debug!("CodegenUnitDebugContext::new"); let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) }; // DIBuilder inherits context from the module, so we'd better use the same one @@ -95,7 +90,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { } } - pub fn finalize(&self, sess: &Session) { + pub(crate) fn finalize(&self, sess: &Session) { unsafe { llvm::LLVMRustDIBuilderFinalize(self.builder); @@ -114,7 +109,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { llvm::LLVMRustAddModuleFlagU32( self.llmod, llvm::LLVMModFlagBehavior::Warning, - c"Dwarf Version".as_ptr().cast(), + c"Dwarf Version".as_ptr(), dwarf_version, ); } else { @@ -122,7 +117,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { llvm::LLVMRustAddModuleFlagU32( self.llmod, llvm::LLVMModFlagBehavior::Warning, - c"CodeView".as_ptr().cast(), + c"CodeView".as_ptr(), 1, ) } @@ -131,7 +126,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { llvm::LLVMRustAddModuleFlagU32( self.llmod, llvm::LLVMModFlagBehavior::Warning, - c"Debug Info Version".as_ptr().cast(), + c"Debug Info Version".as_ptr(), llvm::LLVMRustDebugMetadataVersion(), ); } @@ -139,7 +134,7 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { } /// Creates any deferred debug metadata nodes -pub fn finalize(cx: &CodegenCx<'_, '_>) { +pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) { if let Some(dbg_cx) = &cx.dbg_cx { debug!("finalize"); @@ -214,6 +209,12 @@ impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> { } } + fn clear_dbg_loc(&mut self) { + unsafe { + llvm::LLVMSetCurrentDebugLocation2(self.llbuilder, ptr::null()); + } + } + fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { gdb::insert_reference_to_gdb_debug_scripts_section_global(self) } @@ -246,13 +247,13 @@ impl<'ll> DebugInfoBuilderMethods for Builder<'_, 'll, '_> { // FIXME(eddyb) rename this to better indicate it's a duplicate of // `rustc_span::Loc` rather than `DILocation`, perhaps by making // `lookup_char_pos` return the right information instead. -pub struct DebugLoc { +struct DebugLoc { /// Information about the original source file. - pub file: Lrc<SourceFile>, + file: Lrc<SourceFile>, /// The (1-based) line number. - pub line: u32, + line: u32, /// The (1-based) column number. - pub col: u32, + col: u32, } impl CodegenCx<'_, '_> { @@ -260,7 +261,7 @@ impl CodegenCx<'_, '_> { // FIXME(eddyb) rename this to better indicate it's a duplicate of // `lookup_char_pos` rather than `dbg_loc`, perhaps by making // `lookup_char_pos` return the right information instead. - pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc { + fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc { let (file, line, col) = match self.sess().source_map().lookup_line(pos) { Ok(SourceFileAndLine { sf: file, line }) => { let line_pos = file.lines()[line]; @@ -285,7 +286,7 @@ impl CodegenCx<'_, '_> { } } -impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { +impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn create_function_debug_context( &self, instance: Instance<'tcx>, @@ -575,7 +576,17 @@ impl<'ll, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> { inlined_at: Option<&'ll DILocation>, span: Span, ) -> &'ll DILocation { - let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo()); + // When emitting debugging information, DWARF (i.e. everything but MSVC) + // treats line 0 as a magic value meaning that the code could not be + // attributed to any line in the source. That's also exactly what dummy + // spans are. Make that equivalence here, rather than passing dummy spans + // to lookup_debug_loc, which will return line 1 for them. + let (line, col) = if span.is_dummy() && !self.sess().target.is_like_msvc { + (0, 0) + } else { + let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo()); + (line, col) + }; unsafe { llvm::LLVMRustDIBuilderCreateDebugLocation(line, col, scope, inlined_at) } } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs index fa61c7dde18..9674b1eb848 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs @@ -1,23 +1,22 @@ // Namespace Handling. -use super::utils::{debug_context, DIB}; use rustc_codegen_ssa::debuginfo::type_names; +use rustc_hir::def_id::DefId; use rustc_middle::ty::{self, Instance}; +use super::utils::{debug_context, DIB}; use crate::common::CodegenCx; use crate::llvm; use crate::llvm::debuginfo::DIScope; -use rustc_hir::def_id::DefId; -pub fn mangled_name_of_instance<'a, 'tcx>( +pub(crate) fn mangled_name_of_instance<'a, 'tcx>( cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>, ) -> ty::SymbolName<'tcx> { - let tcx = cx.tcx; - tcx.symbol_name(instance) + cx.tcx.symbol_name(instance) } -pub fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope { +pub(crate) fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope { if let Some(&scope) = debug_context(cx).namespace_map.borrow().get(&def_id) { return scope; } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs index 9bd2ccceadf..321553a3df0 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/utils.rs @@ -1,18 +1,17 @@ // Utility Functions. -use super::namespace::item_namespace; -use super::CodegenUnitDebugContext; - use rustc_hir::def_id::DefId; use rustc_middle::ty::layout::{HasParamEnv, LayoutOf}; use rustc_middle::ty::{self, Ty}; use tracing::trace; +use super::namespace::item_namespace; +use super::CodegenUnitDebugContext; use crate::common::CodegenCx; use crate::llvm; use crate::llvm::debuginfo::{DIArray, DIBuilder, DIDescriptor, DIScope}; -pub fn is_node_local_to_unit(cx: &CodegenCx<'_, '_>, def_id: DefId) -> bool { +pub(crate) fn is_node_local_to_unit(cx: &CodegenCx<'_, '_>, def_id: DefId) -> bool { // The is_local_to_unit flag indicates whether a function is local to the // current compilation unit (i.e., if it is *static* in the C-sense). The // *reachable* set should provide a good approximation of this, as it @@ -25,7 +24,7 @@ pub fn is_node_local_to_unit(cx: &CodegenCx<'_, '_>, def_id: DefId) -> bool { } #[allow(non_snake_case)] -pub fn create_DIArray<'ll>( +pub(crate) fn create_DIArray<'ll>( builder: &DIBuilder<'ll>, arr: &[Option<&'ll DIDescriptor>], ) -> &'ll DIArray { @@ -33,7 +32,7 @@ pub fn create_DIArray<'ll>( } #[inline] -pub fn debug_context<'a, 'll, 'tcx>( +pub(crate) fn debug_context<'a, 'll, 'tcx>( cx: &'a CodegenCx<'ll, 'tcx>, ) -> &'a CodegenUnitDebugContext<'ll, 'tcx> { cx.dbg_cx.as_ref().unwrap() @@ -41,11 +40,11 @@ pub fn debug_context<'a, 'll, 'tcx>( #[inline] #[allow(non_snake_case)] -pub fn DIB<'a, 'll>(cx: &'a CodegenCx<'ll, '_>) -> &'a DIBuilder<'ll> { +pub(crate) fn DIB<'a, 'll>(cx: &'a CodegenCx<'ll, '_>) -> &'a DIBuilder<'ll> { cx.dbg_cx.as_ref().unwrap().builder } -pub fn get_namespace_for_item<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope { +pub(crate) fn get_namespace_for_item<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope { item_namespace(cx, cx.tcx.parent(def_id)) } @@ -63,7 +62,7 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, pointee_ty: Ty<'tcx>, ) -> Option<FatPtrKind> { - let pointee_tail_ty = cx.tcx.struct_tail_erasing_lifetimes(pointee_ty, cx.param_env()); + let pointee_tail_ty = cx.tcx.struct_tail_for_codegen(pointee_ty, cx.param_env()); let layout = cx.layout_of(pointee_tail_ty); trace!( "fat_pointer_kind: {:?} has layout {:?} (is_unsized? {})", diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index bf86d0e0569..b0b29ca1280 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -11,21 +11,21 @@ //! * Use define_* family of methods when you might be defining the Value. //! * When in doubt, define. -use crate::abi::{FnAbi, FnAbiLlvmExt}; -use crate::attributes; -use crate::context::CodegenCx; -use crate::llvm; -use crate::llvm::AttributePlace::Function; -use crate::type_::Type; -use crate::value::Value; use itertools::Itertools; -use rustc_codegen_ssa::traits::TypeMembershipMethods; +use rustc_codegen_ssa::traits::TypeMembershipCodegenMethods; use rustc_data_structures::fx::FxIndexSet; use rustc_middle::ty::{Instance, Ty}; use rustc_sanitizers::{cfi, kcfi}; use smallvec::SmallVec; use tracing::debug; +use crate::abi::{FnAbi, FnAbiLlvmExt}; +use crate::context::CodegenCx; +use crate::llvm::AttributePlace::Function; +use crate::type_::Type; +use crate::value::Value; +use crate::{attributes, llvm}; + /// Declare a function. /// /// If there’s a value with the same name already declared, the function will @@ -65,7 +65,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// /// If there’s a value with the same name already declared, the function will /// return its Value instead. - pub fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value { + pub(crate) fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value { debug!("declare_global(name={:?})", name); unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_ptr().cast(), name.len(), ty) } } @@ -77,7 +77,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. - pub fn declare_cfn( + pub(crate) fn declare_cfn( &self, name: &str, unnamed: llvm::UnnamedAddr, @@ -100,7 +100,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. - pub fn declare_entry_fn( + pub(crate) fn declare_entry_fn( &self, name: &str, callconv: llvm::CallConv, @@ -119,7 +119,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. - pub fn declare_fn( + pub(crate) fn declare_fn( &self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, @@ -137,7 +137,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { llvm::Visibility::Default, fn_abi.llvm_type(self), ); - fn_abi.apply_attrs_llfn(self, llfn); + fn_abi.apply_attrs_llfn(self, llfn, instance); if self.tcx.sess.is_sanitizer_cfi_enabled() { if let Some(instance) = instance { @@ -199,7 +199,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// return `None` if the name already has a definition associated with it. In that /// case an error should be reported to the user, because it usually happens due /// to user’s fault (e.g., misuse of `#[no_mangle]` or `#[export_name]` attributes). - pub fn define_global(&self, name: &str, ty: &'ll Type) -> Option<&'ll Value> { + pub(crate) fn define_global(&self, name: &str, ty: &'ll Type) -> Option<&'ll Value> { if self.get_defined_value(name).is_some() { None } else { @@ -210,19 +210,19 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// Declare a private global /// /// Use this function when you intend to define a global without a name. - pub fn define_private_global(&self, ty: &'ll Type) -> &'ll Value { + pub(crate) fn define_private_global(&self, ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) } } /// Gets declared value by name. - pub fn get_declared_value(&self, name: &str) -> Option<&'ll Value> { + pub(crate) fn get_declared_value(&self, name: &str) -> Option<&'ll Value> { debug!("get_declared_value(name={:?})", name); unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_ptr().cast(), name.len()) } } /// Gets defined or externally defined (AvailableExternally linkage) value by /// name. - pub fn get_defined_value(&self, name: &str) -> Option<&'ll Value> { + pub(crate) fn get_defined_value(&self, name: &str) -> Option<&'ll Value> { self.get_declared_value(name).and_then(|val| { let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 }; if !declaration { Some(val) } else { None } diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs index 40ac2f9c8ba..bb481d2a308 100644 --- a/compiler/rustc_codegen_llvm/src/errors.rs +++ b/compiler/rustc_codegen_llvm/src/errors.rs @@ -1,13 +1,13 @@ -use std::borrow::Cow; use std::ffi::CString; use std::path::Path; -use crate::fluent_generated as fluent; use rustc_data_structures::small_c_str::SmallCStr; use rustc_errors::{Diag, DiagCtxtHandle, Diagnostic, EmissionGuarantee, Level}; use rustc_macros::{Diagnostic, Subdiagnostic}; use rustc_span::Span; +use crate::fluent_generated as fluent; + #[derive(Diagnostic)] #[diag(codegen_llvm_unknown_ctarget_feature_prefix)] #[note] @@ -40,13 +40,6 @@ pub(crate) enum PossibleFeature<'a> { } #[derive(Diagnostic)] -#[diag(codegen_llvm_error_creating_import_library)] -pub(crate) struct ErrorCreatingImportLibrary<'a> { - pub lib_name: &'a str, - pub error: String, -} - -#[derive(Diagnostic)] #[diag(codegen_llvm_symbol_already_defined)] pub(crate) struct SymbolAlreadyDefined<'a> { #[primary_span] @@ -71,28 +64,6 @@ pub(crate) struct InvalidMinimumAlignmentTooLarge { pub(crate) struct SanitizerMemtagRequiresMte; #[derive(Diagnostic)] -#[diag(codegen_llvm_error_writing_def_file)] -pub(crate) struct ErrorWritingDEFFile { - pub error: std::io::Error, -} - -#[derive(Diagnostic)] -#[diag(codegen_llvm_error_calling_dlltool)] -pub(crate) struct ErrorCallingDllTool<'a> { - pub dlltool_path: Cow<'a, str>, - pub error: std::io::Error, -} - -#[derive(Diagnostic)] -#[diag(codegen_llvm_dlltool_fail_import_library)] -pub(crate) struct DlltoolFailImportLibrary<'a> { - pub dlltool_path: Cow<'a, str>, - pub dlltool_args: String, - pub stdout: Cow<'a, str>, - pub stderr: Cow<'a, str>, -} - -#[derive(Diagnostic)] #[diag(codegen_llvm_dynamic_linking_with_lto)] #[note] pub(crate) struct DynamicLinkingWithLTO; @@ -236,13 +207,13 @@ pub(crate) struct CopyBitcode { #[derive(Diagnostic)] #[diag(codegen_llvm_unknown_debuginfo_compression)] -pub struct UnknownCompression { +pub(crate) struct UnknownCompression { pub algorithm: &'static str, } #[derive(Diagnostic)] #[diag(codegen_llvm_mismatch_data_layout)] -pub struct MismatchedDataLayout<'a> { +pub(crate) struct MismatchedDataLayout<'a> { pub rustc_target: &'a str, pub rustc_layout: &'a str, pub llvm_target: &'a str, diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 68c3d47e826..307fb9c35b7 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1,17 +1,11 @@ -use crate::abi::{Abi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode}; -use crate::builder::Builder; -use crate::context::CodegenCx; -use crate::llvm; -use crate::type_::Type; -use crate::type_of::LayoutLlvmExt; -use crate::va_arg::emit_va_arg; -use crate::value::Value; +use std::assert_matches::assert_matches; +use std::cmp::Ordering; use rustc_codegen_ssa::base::{compare_simd_types, wants_msvc_seh, wants_wasm_eh}; use rustc_codegen_ssa::common::{IntPredicate, TypeKind}; use rustc_codegen_ssa::errors::{ExpectedPointerMutability, InvalidMonomorphization}; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; -use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue}; use rustc_codegen_ssa::traits::*; use rustc_hir as hir; use rustc_middle::mir::BinOp; @@ -23,7 +17,14 @@ use rustc_target::abi::{self, Align, Float, HasDataLayout, Primitive, Size}; use rustc_target::spec::{HasTargetSpec, PanicStrategy}; use tracing::debug; -use std::cmp::Ordering; +use crate::abi::{Abi, FnAbi, FnAbiLlvmExt, LlvmType, PassMode}; +use crate::builder::Builder; +use crate::context::CodegenCx; +use crate::llvm; +use crate::type_::Type; +use crate::type_of::LayoutLlvmExt; +use crate::va_arg::emit_va_arg; +use crate::value::Value; fn get_simple_intrinsic<'ll>( cx: &CodegenCx<'ll, '_>, @@ -35,10 +36,10 @@ fn get_simple_intrinsic<'ll>( sym::sqrtf64 => "llvm.sqrt.f64", sym::sqrtf128 => "llvm.sqrt.f128", - sym::powif16 => "llvm.powi.f16", - sym::powif32 => "llvm.powi.f32", - sym::powif64 => "llvm.powi.f64", - sym::powif128 => "llvm.powi.f128", + sym::powif16 => "llvm.powi.f16.i32", + sym::powif32 => "llvm.powi.f32.i32", + sym::powif64 => "llvm.powi.f64.i32", + sym::powif128 => "llvm.powi.f128.i32", sym::sinf16 => "llvm.sin.f16", sym::sinf32 => "llvm.sin.f32", @@ -147,7 +148,7 @@ fn get_simple_intrinsic<'ll>( Some(cx.get_intrinsic(llvm_name)) } -impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { +impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { fn codegen_intrinsic_call( &mut self, instance: ty::Instance<'tcx>, @@ -186,23 +187,57 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { Some(instance), ) } - sym::likely => { - self.call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(true)]) - } + sym::likely => self.expect(args[0].immediate(), true), sym::is_val_statically_known => { let intrinsic_type = args[0].layout.immediate_llvm_type(self.cx); - match self.type_kind(intrinsic_type) { - TypeKind::Pointer | TypeKind::Integer | TypeKind::Float | TypeKind::Double => { - self.call_intrinsic( - &format!("llvm.is.constant.{:?}", intrinsic_type), - &[args[0].immediate()], - ) + let kind = self.type_kind(intrinsic_type); + let intrinsic_name = match kind { + TypeKind::Pointer | TypeKind::Integer => { + Some(format!("llvm.is.constant.{intrinsic_type:?}")) + } + // LLVM float types' intrinsic names differ from their type names. + TypeKind::Half => Some(format!("llvm.is.constant.f16")), + TypeKind::Float => Some(format!("llvm.is.constant.f32")), + TypeKind::Double => Some(format!("llvm.is.constant.f64")), + TypeKind::FP128 => Some(format!("llvm.is.constant.f128")), + _ => None, + }; + if let Some(intrinsic_name) = intrinsic_name { + self.call_intrinsic(&intrinsic_name, &[args[0].immediate()]) + } else { + self.const_bool(false) + } + } + sym::unlikely => self.expect(args[0].immediate(), false), + sym::select_unpredictable => { + let cond = args[0].immediate(); + assert_eq!(args[1].layout, args[2].layout); + let select = |bx: &mut Self, true_val, false_val| { + let result = bx.select(cond, true_val, false_val); + bx.set_unpredictable(&result); + result + }; + match (args[1].val, args[2].val) { + (OperandValue::Ref(true_val), OperandValue::Ref(false_val)) => { + assert!(true_val.llextra.is_none()); + assert!(false_val.llextra.is_none()); + assert_eq!(true_val.align, false_val.align); + let ptr = select(self, true_val.llval, false_val.llval); + let selected = + OperandValue::Ref(PlaceValue::new_sized(ptr, true_val.align)); + selected.store(self, result); + return Ok(()); + } + (OperandValue::Immediate(_), OperandValue::Immediate(_)) + | (OperandValue::Pair(_, _), OperandValue::Pair(_, _)) => { + let true_val = args[1].immediate_or_packed_pair(self); + let false_val = args[2].immediate_or_packed_pair(self); + select(self, true_val, false_val) } - _ => self.const_bool(false), + (OperandValue::ZeroSized, OperandValue::ZeroSized) => return Ok(()), + _ => span_bug!(span, "Incompatible OperandValue for select_unpredictable"), } } - sym::unlikely => self - .call_intrinsic("llvm.expect.i1", &[args[0].immediate(), self.const_bool(false)]), sym::catch_unwind => { catch_unwind_intrinsic( self, @@ -369,7 +404,8 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { let llvm_name = &format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width); - // llvm expects shift to be the same type as the values, but rust always uses `u32` + // llvm expects shift to be the same type as the values, but rust + // always uses `u32`. let raw_shift = self.intcast(raw_shift, self.val_ty(val), false); self.call_intrinsic(llvm_name, &[val, val, raw_shift]) @@ -538,6 +574,8 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { span, ) { Ok(llval) => llval, + // If there was an error, just skip this invocation... we'll abort compilation + // anyway, but we can keep codegen'ing to find more errors. Err(()) => return Ok(()), } } @@ -566,11 +604,17 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { } fn assume(&mut self, val: Self::Value) { - self.call_intrinsic("llvm.assume", &[val]); + if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No { + self.call_intrinsic("llvm.assume", &[val]); + } } fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value { - self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)]) + if self.cx.sess().opts.optimize != rustc_session::config::OptLevel::No { + self.call_intrinsic("llvm.expect.i1", &[cond, self.const_bool(expected)]) + } else { + cond + } } fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value { @@ -1113,7 +1157,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if cfg!(debug_assertions) { for (ty, arg) in arg_tys.iter().zip(args) { if ty.is_simd() { - assert!(matches!(arg.val, OperandValue::Immediate(_))); + assert_matches!(arg.val, OperandValue::Immediate(_)); } } } @@ -1249,19 +1293,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } if name == sym::simd_shuffle { - // Make sure this is actually an array, since typeck only checks the length-suffixed - // version of this intrinsic. - let n: u64 = match args[2].layout.ty.kind() { - ty::Array(ty, len) if matches!(ty.kind(), ty::Uint(ty::UintTy::U32)) => { - len.try_eval_target_usize(bx.cx.tcx, ty::ParamEnv::reveal_all()).unwrap_or_else( - || span_bug!(span, "could not evaluate shuffle index array length"), - ) - } - _ => return_error!(InvalidMonomorphization::SimdShuffle { - span, - name, - ty: args[2].layout.ty - }), + // Make sure this is actually a SIMD vector. + let idx_ty = args[2].layout.ty; + let n: u64 = if idx_ty.is_simd() + && matches!(idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32)) + { + idx_ty.simd_size_and_type(bx.cx.tcx).0 + } else { + return_error!(InvalidMonomorphization::SimdShuffle { span, name, ty: idx_ty }) }; let (out_len, out_ty) = require_simd!(ret_ty, SimdReturn); @@ -1276,38 +1315,24 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let total_len = u128::from(in_len) * 2; - let vector = args[2].immediate(); - - let indices: Option<Vec<_>> = (0..n) - .map(|i| { - let arg_idx = i; - let val = bx.const_get_elt(vector, i as u64); - match bx.const_to_opt_u128(val, true) { - None => { - bug!("typeck should have already ensured that these are const") - } - Some(idx) if idx >= total_len => { - bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds { - span, - name, - arg_idx, - total_len, - }); - None - } - Some(idx) => Some(bx.const_i32(idx as i32)), - } - }) - .collect(); - let Some(indices) = indices else { - return Ok(bx.const_null(llret_ty)); - }; + // Check that the indices are in-bounds. + let indices = args[2].immediate(); + for i in 0..n { + let val = bx.const_get_elt(indices, i as u64); + let idx = bx + .const_to_opt_u128(val, true) + .unwrap_or_else(|| bug!("typeck should have already ensured that these are const")); + if idx >= total_len { + return_error!(InvalidMonomorphization::SimdIndexOutOfBounds { + span, + name, + arg_idx: i, + total_len, + }); + } + } - return Ok(bx.shuffle_vector( - args[0].immediate(), - args[1].immediate(), - bx.const_vector(&indices), - )); + return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), indices)); } if name == sym::simd_insert { @@ -1325,13 +1350,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>( .const_to_opt_u128(args[1].immediate(), false) .expect("typeck should have ensure that this is a const"); if idx >= in_len.into() { - bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds { + return_error!(InvalidMonomorphization::SimdIndexOutOfBounds { span, name, arg_idx: 1, total_len: in_len.into(), }); - return Ok(bx.const_null(llret_ty)); } return Ok(bx.insert_element( args[0].immediate(), @@ -1348,13 +1372,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>( .const_to_opt_u128(args[1].immediate(), false) .expect("typeck should have ensure that this is a const"); if idx >= in_len.into() { - bx.sess().dcx().emit_err(InvalidMonomorphization::SimdIndexOutOfBounds { + return_error!(InvalidMonomorphization::SimdIndexOutOfBounds { span, name, arg_idx: 1, total_len: in_len.into(), }); - return Ok(bx.const_null(llret_ty)); } return Ok(bx.extract_element(args[0].immediate(), bx.const_i32(idx as i32))); } @@ -1825,7 +1848,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( require!( matches!( *pointer_ty.kind(), - ty::RawPtr(p_ty, p_mutbl) if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut() + ty::RawPtr(p_ty, p_mutbl) + if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut() ), InvalidMonomorphization::ExpectedElementType { span, @@ -2044,14 +2068,14 @@ fn generic_simd_intrinsic<'ll, 'tcx>( }; } - arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, 0.0); + arith_red!(simd_reduce_add_ordered: vector_reduce_add, vector_reduce_fadd, true, add, -0.0); arith_red!(simd_reduce_mul_ordered: vector_reduce_mul, vector_reduce_fmul, true, mul, 1.0); arith_red!( simd_reduce_add_unordered: vector_reduce_add, vector_reduce_fadd_reassoc, false, add, - 0.0 + -0.0 ); arith_red!( simd_reduce_mul_unordered: vector_reduce_mul, diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index a96993b9aba..7f26bbd7f87 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -8,6 +8,7 @@ #![allow(internal_features)] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] +#![feature(assert_matches)] #![feature(exact_size_is_empty)] #![feature(extern_types)] #![feature(hash_raw_entry)] @@ -15,11 +16,16 @@ #![feature(iter_intersperse)] #![feature(let_chains)] #![feature(rustdoc_internals)] +#![warn(unreachable_pub)] // tidy-alphabetical-end +use std::any::Any; +use std::ffi::CStr; +use std::io::Write; +use std::mem::ManuallyDrop; + use back::owned_target_machine::OwnedTargetMachine; use back::write::{create_informational_target_machine, create_target_machine}; - use errors::ParseTargetMachineConfig; pub use llvm_util::target_features; use rustc_ast::expand::allocator::AllocatorKind; @@ -28,8 +34,7 @@ use rustc_codegen_ssa::back::write::{ CodegenContext, FatLtoInput, ModuleConfig, TargetMachineFactoryConfig, TargetMachineFactoryFn, }; use rustc_codegen_ssa::traits::*; -use rustc_codegen_ssa::ModuleCodegen; -use rustc_codegen_ssa::{CodegenResults, CompiledModule}; +use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen}; use rustc_data_structures::fx::FxIndexMap; use rustc_errors::{DiagCtxtHandle, ErrorGuaranteed, FatalError}; use rustc_metadata::EncodedMetadata; @@ -40,17 +45,12 @@ use rustc_session::config::{OptLevel, OutputFilenames, PrintKind, PrintRequest}; use rustc_session::Session; use rustc_span::symbol::Symbol; -use std::any::Any; -use std::ffi::CStr; -use std::io::Write; -use std::mem::ManuallyDrop; - mod back { - pub mod archive; - pub mod lto; - pub mod owned_target_machine; + pub(crate) mod archive; + pub(crate) mod lto; + pub(crate) mod owned_target_machine; mod profiling; - pub mod write; + pub(crate) mod write; } mod abi; @@ -271,7 +271,7 @@ impl CodegenBackend for LlvmCodegenBackend { fn provide(&self, providers: &mut Providers) { providers.global_backend_features = - |tcx, ()| llvm_util::global_llvm_features(tcx.sess, true) + |tcx, ()| llvm_util::global_llvm_features(tcx.sess, true, false) } fn print(&self, req: &PrintRequest, out: &mut String, sess: &Session) { @@ -394,9 +394,10 @@ impl CodegenBackend for LlvmCodegenBackend { codegen_results: CodegenResults, outputs: &OutputFilenames, ) -> Result<(), ErrorGuaranteed> { - use crate::back::archive::LlvmArchiveBuilderBuilder; use rustc_codegen_ssa::back::link::link_binary; + use crate::back::archive::LlvmArchiveBuilderBuilder; + // Run the linker on any artifacts that resulted from the LLVM run. // This should produce either a finished executable or library. link_binary(sess, &LlvmArchiveBuilderBuilder, &codegen_results, outputs) @@ -435,7 +436,7 @@ impl ModuleLlvm { ModuleLlvm { llmod_raw, llcx, - tm: ManuallyDrop::new(create_informational_target_machine(tcx.sess)), + tm: ManuallyDrop::new(create_informational_target_machine(tcx.sess, false)), } } } diff --git a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs index 7d948970223..4dabde55e98 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/archive_ro.rs @@ -1,9 +1,9 @@ //! A wrapper around LLVM's archive (.a) code -use rustc_fs_util::path_to_c_string; use std::path::Path; -use std::slice; -use std::str; +use std::{slice, str}; + +use rustc_fs_util::path_to_c_string; pub struct ArchiveRO { pub raw: &'static mut super::Archive, diff --git a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs index 73e1b08a3d7..a4cb5a25d1b 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/diagnostic.rs @@ -1,13 +1,12 @@ //! LLVM diagnostic reports. -pub use self::Diagnostic::*; -pub use self::OptimizationDiagnosticKind::*; - -use crate::value::Value; use libc::c_uint; +use rustc_span::InnerSpan; +pub use self::Diagnostic::*; +pub use self::OptimizationDiagnosticKind::*; use super::{DiagnosticInfo, SMDiagnostic}; -use rustc_span::InnerSpan; +use crate::value::Value; #[derive(Copy, Clone, Debug)] pub enum OptimizationDiagnosticKind { diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index ae46200d3f5..a588f11b623 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1,18 +1,16 @@ #![allow(non_camel_case_types)] #![allow(non_upper_case_globals)] +use std::marker::PhantomData; + +use libc::{c_char, c_int, c_uint, c_ulonglong, c_void, size_t}; + use super::debuginfo::{ DIArray, DIBasicType, DIBuilder, DICompositeType, DIDerivedType, DIDescriptor, DIEnumerator, DIFile, DIFlags, DIGlobalVariableExpression, DILexicalBlock, DILocation, DINameSpace, DISPFlags, DIScope, DISubprogram, DISubrange, DITemplateTypeParameter, DIType, DIVariable, DebugEmissionKind, DebugNameTableKind, }; - -use libc::{c_char, c_int, c_uint, size_t}; -use libc::{c_ulonglong, c_void}; - -use std::marker::PhantomData; - use super::RustString; pub type Bool = c_uint; @@ -222,17 +220,18 @@ pub enum IntPredicate { impl IntPredicate { pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self { + use rustc_codegen_ssa::common::IntPredicate as Common; match intpre { - rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ, - rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE, - rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT, - rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE, - rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT, - rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE, - rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT, - rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE, - rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT, - rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE, + Common::IntEQ => Self::IntEQ, + Common::IntNE => Self::IntNE, + Common::IntUGT => Self::IntUGT, + Common::IntUGE => Self::IntUGE, + Common::IntULT => Self::IntULT, + Common::IntULE => Self::IntULE, + Common::IntSGT => Self::IntSGT, + Common::IntSGE => Self::IntSGE, + Common::IntSLT => Self::IntSLT, + Common::IntSLE => Self::IntSLE, } } } @@ -261,27 +260,24 @@ pub enum RealPredicate { impl RealPredicate { pub fn from_generic(realp: rustc_codegen_ssa::common::RealPredicate) -> Self { + use rustc_codegen_ssa::common::RealPredicate as Common; match realp { - rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => { - RealPredicate::RealPredicateFalse - } - rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ, - rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT, - rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE, - rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT, - rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE, - rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE, - rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD, - rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO, - rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ, - rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT, - rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE, - rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT, - rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE, - rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE, - rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => { - RealPredicate::RealPredicateTrue - } + Common::RealPredicateFalse => Self::RealPredicateFalse, + Common::RealOEQ => Self::RealOEQ, + Common::RealOGT => Self::RealOGT, + Common::RealOGE => Self::RealOGE, + Common::RealOLT => Self::RealOLT, + Common::RealOLE => Self::RealOLE, + Common::RealONE => Self::RealONE, + Common::RealORD => Self::RealORD, + Common::RealUNO => Self::RealUNO, + Common::RealUEQ => Self::RealUEQ, + Common::RealUGT => Self::RealUGT, + Common::RealUGE => Self::RealUGE, + Common::RealULT => Self::RealULT, + Common::RealULE => Self::RealULE, + Common::RealUNE => Self::RealUNE, + Common::RealPredicateTrue => Self::RealPredicateTrue, } } } @@ -305,7 +301,6 @@ pub enum TypeKind { Pointer = 12, Vector = 13, Metadata = 14, - X86_MMX = 15, Token = 16, ScalableVector = 17, BFloat = 18, @@ -314,27 +309,27 @@ pub enum TypeKind { impl TypeKind { pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind { + use rustc_codegen_ssa::common::TypeKind as Common; match self { - TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void, - TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half, - TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float, - TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double, - TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80, - TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128, - TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128, - TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label, - TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer, - TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function, - TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct, - TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array, - TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer, - TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector, - TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata, - TypeKind::X86_MMX => rustc_codegen_ssa::common::TypeKind::X86_MMX, - TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token, - TypeKind::ScalableVector => rustc_codegen_ssa::common::TypeKind::ScalableVector, - TypeKind::BFloat => rustc_codegen_ssa::common::TypeKind::BFloat, - TypeKind::X86_AMX => rustc_codegen_ssa::common::TypeKind::X86_AMX, + Self::Void => Common::Void, + Self::Half => Common::Half, + Self::Float => Common::Float, + Self::Double => Common::Double, + Self::X86_FP80 => Common::X86_FP80, + Self::FP128 => Common::FP128, + Self::PPC_FP128 => Common::PPC_FP128, + Self::Label => Common::Label, + Self::Integer => Common::Integer, + Self::Function => Common::Function, + Self::Struct => Common::Struct, + Self::Array => Common::Array, + Self::Pointer => Common::Pointer, + Self::Vector => Common::Vector, + Self::Metadata => Common::Metadata, + Self::Token => Common::Token, + Self::ScalableVector => Common::ScalableVector, + Self::BFloat => Common::BFloat, + Self::X86_AMX => Common::X86_AMX, } } } @@ -358,18 +353,19 @@ pub enum AtomicRmwBinOp { impl AtomicRmwBinOp { pub fn from_generic(op: rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self { + use rustc_codegen_ssa::common::AtomicRmwBinOp as Common; match op { - rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg, - rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd, - rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub, - rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd, - rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand, - rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr, - rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor, - rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax, - rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin, - rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax, - rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin, + Common::AtomicXchg => Self::AtomicXchg, + Common::AtomicAdd => Self::AtomicAdd, + Common::AtomicSub => Self::AtomicSub, + Common::AtomicAnd => Self::AtomicAnd, + Common::AtomicNand => Self::AtomicNand, + Common::AtomicOr => Self::AtomicOr, + Common::AtomicXor => Self::AtomicXor, + Common::AtomicMax => Self::AtomicMax, + Common::AtomicMin => Self::AtomicMin, + Common::AtomicUMax => Self::AtomicUMax, + Common::AtomicUMin => Self::AtomicUMin, } } } @@ -391,17 +387,14 @@ pub enum AtomicOrdering { impl AtomicOrdering { pub fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self { + use rustc_codegen_ssa::common::AtomicOrdering as Common; match ao { - rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered, - rustc_codegen_ssa::common::AtomicOrdering::Relaxed => AtomicOrdering::Monotonic, - rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire, - rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release, - rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease => { - AtomicOrdering::AcquireRelease - } - rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent => { - AtomicOrdering::SequentiallyConsistent - } + Common::Unordered => Self::Unordered, + Common::Relaxed => Self::Monotonic, + Common::Acquire => Self::Acquire, + Common::Release => Self::Release, + Common::AcquireRelease => Self::AcquireRelease, + Common::SequentiallyConsistent => Self::SequentiallyConsistent, } } } @@ -430,6 +423,7 @@ pub enum MetadataType { MD_nontemporal = 9, MD_mem_parallel_loop_access = 10, MD_nonnull = 11, + MD_unpredictable = 15, MD_align = 17, MD_type = 19, MD_vcall_visibility = 28, @@ -566,13 +560,11 @@ pub enum ArchiveKind { K_AIXBIG, } -// LLVMRustThinLTOData -extern "C" { +unsafe extern "C" { + // LLVMRustThinLTOData pub type ThinLTOData; -} -// LLVMRustThinLTOBuffer -extern "C" { + // LLVMRustThinLTOBuffer pub type ThinLTOBuffer; } @@ -624,7 +616,7 @@ pub enum MemoryEffects { InaccessibleMemOnly, } -extern "C" { +unsafe extern "C" { type Opaque; } #[repr(C)] @@ -634,54 +626,32 @@ struct InvariantOpaque<'a> { } // Opaque pointer types -extern "C" { +unsafe extern "C" { pub type Module; -} -extern "C" { pub type Context; -} -extern "C" { pub type Type; -} -extern "C" { pub type Value; -} -extern "C" { pub type ConstantInt; -} -extern "C" { pub type Attribute; -} -extern "C" { pub type Metadata; -} -extern "C" { pub type BasicBlock; } #[repr(C)] pub struct Builder<'a>(InvariantOpaque<'a>); #[repr(C)] pub struct PassManager<'a>(InvariantOpaque<'a>); -extern "C" { +unsafe extern "C" { pub type Pass; -} -extern "C" { pub type TargetMachine; -} -extern "C" { pub type Archive; } #[repr(C)] pub struct ArchiveIterator<'a>(InvariantOpaque<'a>); #[repr(C)] pub struct ArchiveChild<'a>(InvariantOpaque<'a>); -extern "C" { +unsafe extern "C" { pub type Twine; -} -extern "C" { pub type DiagnosticInfo; -} -extern "C" { pub type SMDiagnostic; } #[repr(C)] @@ -691,7 +661,7 @@ pub struct OperandBundleDef<'a>(InvariantOpaque<'a>); #[repr(C)] pub struct Linker<'a>(InvariantOpaque<'a>); -extern "C" { +unsafe extern "C" { pub type DiagnosticHandler; } @@ -699,9 +669,10 @@ pub type DiagnosticHandlerTy = unsafe extern "C" fn(&DiagnosticInfo, *mut c_void pub type InlineAsmDiagHandlerTy = unsafe extern "C" fn(&SMDiagnostic, *const c_void, c_uint); pub mod debuginfo { - use super::{InvariantOpaque, Metadata}; use bitflags::bitflags; + use super::{InvariantOpaque, Metadata}; + #[repr(C)] pub struct DIBuilder<'a>(InvariantOpaque<'a>); @@ -825,7 +796,7 @@ bitflags! { } } -extern "C" { +unsafe extern "C" { pub type ModuleBuffer; } @@ -836,7 +807,7 @@ pub type SelfProfileAfterPassCallback = unsafe extern "C" fn(*mut c_void); pub type GetSymbolsCallback = unsafe extern "C" fn(*mut c_void, *const c_char) -> *mut c_void; pub type GetSymbolsErrorCallback = unsafe extern "C" fn(*const c_char) -> *mut c_void; -extern "C" { +unsafe extern "C" { // Create and destroy contexts. pub fn LLVMContextDispose(C: &'static mut Context); pub fn LLVMGetMDKindIDInContext(C: &Context, Name: *const c_char, SLen: c_uint) -> c_uint; @@ -976,6 +947,7 @@ extern "C" { pub fn LLVMGetAlignment(Global: &Value) -> c_uint; pub fn LLVMSetAlignment(Global: &Value, Bytes: c_uint); pub fn LLVMSetDLLStorageClass(V: &Value, C: DLLStorageClass); + pub fn LLVMGlobalGetValueType(Global: &Value) -> &Type; // Operations on global variables pub fn LLVMIsAGlobalVariable(GlobalVar: &Value) -> Option<&Value>; @@ -1042,7 +1014,7 @@ extern "C" { pub fn LLVMDisposeBuilder<'a>(Builder: &'a mut Builder<'a>); // Metadata - pub fn LLVMSetCurrentDebugLocation2<'a>(Builder: &Builder<'a>, Loc: &'a Metadata); + pub fn LLVMSetCurrentDebugLocation2<'a>(Builder: &Builder<'a>, Loc: *const Metadata); // Terminators pub fn LLVMBuildRetVoid<'a>(B: &Builder<'a>) -> &'a Value; @@ -1520,7 +1492,7 @@ extern "C" { } #[link(name = "llvm-wrapper", kind = "static")] -extern "C" { +unsafe extern "C" { pub fn LLVMRustInstallErrorHandlers(); pub fn LLVMRustDisableSystemDialogsOnCrash(); @@ -1577,6 +1549,12 @@ extern "C" { pub fn LLVMRustCreateAllocSizeAttr(C: &Context, size_arg: u32) -> &Attribute; pub fn LLVMRustCreateAllocKindAttr(C: &Context, size_arg: u64) -> &Attribute; pub fn LLVMRustCreateMemoryEffectsAttr(C: &Context, effects: MemoryEffects) -> &Attribute; + pub fn LLVMRustCreateRangeAttribute( + C: &Context, + num_bits: c_uint, + lower_words: *const u64, + upper_words: *const u64, + ) -> &Attribute; // Operations on functions pub fn LLVMRustGetOrInsertFunction<'a>( @@ -1856,6 +1834,8 @@ extern "C" { CSKind: ChecksumKind, Checksum: *const c_char, ChecksumLen: size_t, + Source: *const c_char, + SourceLen: size_t, ) -> &'a DIFile; pub fn LLVMRustDIBuilderCreateSubroutineType<'a>( @@ -2170,7 +2150,8 @@ extern "C" { pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char; - // This function makes copies of pointed to data, so the data's lifetime may end after this function returns + // This function makes copies of pointed to data, so the data's lifetime may end after this + // function returns. pub fn LLVMRustCreateTargetMachine( Triple: *const c_char, CPU: *const c_char, @@ -2219,6 +2200,7 @@ extern "C" { IsLinkerPluginLTO: bool, NoPrepopulatePasses: bool, VerifyIR: bool, + LintIR: bool, UseThinLTOBuffers: bool, MergeFunctions: bool, UnrollLoops: bool, diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 6ab1eea9597..d0db350a149 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -1,5 +1,15 @@ #![allow(non_snake_case)] +use std::cell::RefCell; +use std::ffi::{CStr, CString}; +use std::str::FromStr; +use std::string::FromUtf8Error; + +use libc::c_uint; +use rustc_data_structures::small_c_str::SmallCStr; +use rustc_llvm::RustString; +use rustc_target::abi::{Align, Size, WrappingRange}; + pub use self::AtomicRmwBinOp::*; pub use self::CallConv::*; pub use self::CodeGenOptSize::*; @@ -8,15 +18,6 @@ pub use self::Linkage::*; pub use self::MetadataType::*; pub use self::RealPredicate::*; -use libc::c_uint; -use rustc_data_structures::small_c_str::SmallCStr; -use rustc_llvm::RustString; -use rustc_target::abi::Align; -use std::cell::RefCell; -use std::ffi::{CStr, CString}; -use std::str::FromStr; -use std::string::FromUtf8Error; - pub mod archive_ro; pub mod diagnostic; mod ffi; @@ -104,6 +105,21 @@ pub fn CreateAllocKindAttr(llcx: &Context, kind_arg: AllocKindFlags) -> &Attribu unsafe { LLVMRustCreateAllocKindAttr(llcx, kind_arg.bits()) } } +pub fn CreateRangeAttr(llcx: &Context, size: Size, range: WrappingRange) -> &Attribute { + let lower = range.start; + let upper = range.end.wrapping_add(1); + let lower_words = [lower as u64, (lower >> 64) as u64]; + let upper_words = [upper as u64, (upper >> 64) as u64]; + unsafe { + LLVMRustCreateRangeAttribute( + llcx, + size.bits().try_into().unwrap(), + lower_words.as_ptr(), + upper_words.as_ptr(), + ) + } +} + #[derive(Copy, Clone)] pub enum AttributePlace { ReturnValue, diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index 4d56d1d3b1a..fd8db4ad1d5 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -1,28 +1,28 @@ -use crate::back::write::create_informational_target_machine; -use crate::errors::{ - FixedX18InvalidArch, InvalidTargetFeaturePrefix, PossibleFeature, TargetFeatureDisableOrEnable, - UnknownCTargetFeature, UnknownCTargetFeaturePrefix, UnstableCTargetFeature, -}; -use crate::llvm; +use std::ffi::{c_char, c_void, CStr, CString}; +use std::fmt::Write; +use std::path::Path; +use std::sync::Once; +use std::{ptr, slice, str}; + use libc::c_int; use rustc_codegen_ssa::base::wants_wasm_eh; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::small_c_str::SmallCStr; +use rustc_data_structures::unord::UnordSet; use rustc_fs_util::path_to_c_string; use rustc_middle::bug; use rustc_session::config::{PrintKind, PrintRequest}; use rustc_session::Session; use rustc_span::symbol::Symbol; -use rustc_target::spec::{MergeFunctions, PanicStrategy}; +use rustc_target::spec::{MergeFunctions, PanicStrategy, SmallDataThresholdSupport}; use rustc_target::target_features::{RUSTC_SPECIAL_FEATURES, RUSTC_SPECIFIC_FEATURES}; -use std::ffi::{c_char, c_void, CStr, CString}; -use std::fmt::Write; -use std::path::Path; -use std::ptr; -use std::slice; -use std::str; -use std::sync::Once; +use crate::back::write::create_informational_target_machine; +use crate::errors::{ + FixedX18InvalidArch, InvalidTargetFeaturePrefix, PossibleFeature, TargetFeatureDisableOrEnable, + UnknownCTargetFeature, UnknownCTargetFeaturePrefix, UnstableCTargetFeature, +}; +use crate::llvm; static INIT: Once = Once::new(); @@ -125,6 +125,18 @@ unsafe fn configure_llvm(sess: &Session) { for arg in sess_args { add(&(*arg), true); } + + match ( + sess.opts.unstable_opts.small_data_threshold, + sess.target.small_data_threshold_support(), + ) { + // Set up the small-data optimization limit for architectures that use + // an LLVM argument to control this. + (Some(threshold), SmallDataThresholdSupport::LlvmArg(arg)) => { + add(&format!("--{arg}={threshold}"), false) + } + _ => (), + }; } if sess.opts.unstable_opts.llvm_time_trace { @@ -136,14 +148,14 @@ unsafe fn configure_llvm(sess: &Session) { unsafe { llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr()) }; } -pub fn time_trace_profiler_finish(file_name: &Path) { +pub(crate) fn time_trace_profiler_finish(file_name: &Path) { unsafe { let file_name = path_to_c_string(file_name); llvm::LLVMRustTimeTraceProfilerFinish(file_name.as_ptr()); } } -pub enum TargetFeatureFoldStrength<'a> { +enum TargetFeatureFoldStrength<'a> { // The feature is only tied when enabling the feature, disabling // this feature shouldn't disable the tied feature. EnableOnly(&'a str), @@ -160,28 +172,28 @@ impl<'a> TargetFeatureFoldStrength<'a> { } } -pub struct LLVMFeature<'a> { - pub llvm_feature_name: &'a str, - pub dependency: Option<TargetFeatureFoldStrength<'a>>, +pub(crate) struct LLVMFeature<'a> { + llvm_feature_name: &'a str, + dependency: Option<TargetFeatureFoldStrength<'a>>, } impl<'a> LLVMFeature<'a> { - pub fn new(llvm_feature_name: &'a str) -> Self { + fn new(llvm_feature_name: &'a str) -> Self { Self { llvm_feature_name, dependency: None } } - pub fn with_dependency( + fn with_dependency( llvm_feature_name: &'a str, dependency: TargetFeatureFoldStrength<'a>, ) -> Self { Self { llvm_feature_name, dependency: Some(dependency) } } - pub fn contains(&self, feat: &str) -> bool { + fn contains(&self, feat: &str) -> bool { self.iter().any(|dep| dep == feat) } - pub fn iter(&'a self) -> impl Iterator<Item = &'a str> { + fn iter(&'a self) -> impl Iterator<Item = &'a str> { let dependencies = self.dependency.iter().map(|feat| feat.as_str()); std::iter::once(self.llvm_feature_name).chain(dependencies) } @@ -205,11 +217,11 @@ impl<'a> IntoIterator for LLVMFeature<'a> { // where `{ARCH}` is the architecture name. Look for instances of `SubtargetFeature`. // // Check the current rustc fork of LLVM in the repo at https://github.com/rust-lang/llvm-project/. -// The commit in use can be found via the `llvm-project` submodule in https://github.com/rust-lang/rust/tree/master/src -// Though note that Rust can also be build with an external precompiled version of LLVM -// which might lead to failures if the oldest tested / supported LLVM version -// doesn't yet support the relevant intrinsics -pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> LLVMFeature<'a> { +// The commit in use can be found via the `llvm-project` submodule in +// https://github.com/rust-lang/rust/tree/master/src Though note that Rust can also be build with +// an external precompiled version of LLVM which might lead to failures if the oldest tested / +// supported LLVM version doesn't yet support the relevant intrinsics. +pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFeature<'a>> { let arch = if sess.target.arch == "x86_64" { "x86" } else if sess.target.arch == "arm64ec" { @@ -218,78 +230,51 @@ pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> LLVMFeature<'a> { &*sess.target.arch }; match (arch, s) { - ("x86", "sse4.2") => { - LLVMFeature::with_dependency("sse4.2", TargetFeatureFoldStrength::EnableOnly("crc32")) - } - ("x86", "pclmulqdq") => LLVMFeature::new("pclmul"), - ("x86", "rdrand") => LLVMFeature::new("rdrnd"), - ("x86", "bmi1") => LLVMFeature::new("bmi"), - ("x86", "cmpxchg16b") => LLVMFeature::new("cx16"), - ("x86", "lahfsahf") => LLVMFeature::new("sahf"), - ("aarch64", "rcpc2") => LLVMFeature::new("rcpc-immo"), - ("aarch64", "dpb") => LLVMFeature::new("ccpp"), - ("aarch64", "dpb2") => LLVMFeature::new("ccdp"), - ("aarch64", "frintts") => LLVMFeature::new("fptoint"), - ("aarch64", "fcma") => LLVMFeature::new("complxnum"), - ("aarch64", "pmuv3") => LLVMFeature::new("perfmon"), - ("aarch64", "paca") => LLVMFeature::new("pauth"), - ("aarch64", "pacg") => LLVMFeature::new("pauth"), + ("x86", "sse4.2") => Some(LLVMFeature::with_dependency( + "sse4.2", + TargetFeatureFoldStrength::EnableOnly("crc32"), + )), + ("x86", "pclmulqdq") => Some(LLVMFeature::new("pclmul")), + ("x86", "rdrand") => Some(LLVMFeature::new("rdrnd")), + ("x86", "bmi1") => Some(LLVMFeature::new("bmi")), + ("x86", "cmpxchg16b") => Some(LLVMFeature::new("cx16")), + ("x86", "lahfsahf") => Some(LLVMFeature::new("sahf")), + ("aarch64", "rcpc2") => Some(LLVMFeature::new("rcpc-immo")), + ("aarch64", "dpb") => Some(LLVMFeature::new("ccpp")), + ("aarch64", "dpb2") => Some(LLVMFeature::new("ccdp")), + ("aarch64", "frintts") => Some(LLVMFeature::new("fptoint")), + ("aarch64", "fcma") => Some(LLVMFeature::new("complxnum")), + ("aarch64", "pmuv3") => Some(LLVMFeature::new("perfmon")), + ("aarch64", "paca") => Some(LLVMFeature::new("pauth")), + ("aarch64", "pacg") => Some(LLVMFeature::new("pauth")), + ("aarch64", "sve-b16b16") => Some(LLVMFeature::new("b16b16")), + ("aarch64", "flagm2") => Some(LLVMFeature::new("altnzcv")), // Rust ties fp and neon together. ("aarch64", "neon") => { - LLVMFeature::with_dependency("neon", TargetFeatureFoldStrength::Both("fp-armv8")) + Some(LLVMFeature::with_dependency("neon", TargetFeatureFoldStrength::Both("fp-armv8"))) } // In LLVM neon implicitly enables fp, but we manually enable // neon when a feature only implicitly enables fp - ("aarch64", "f32mm") => { - LLVMFeature::with_dependency("f32mm", TargetFeatureFoldStrength::EnableOnly("neon")) - } - ("aarch64", "f64mm") => { - LLVMFeature::with_dependency("f64mm", TargetFeatureFoldStrength::EnableOnly("neon")) - } - ("aarch64", "fhm") => { - LLVMFeature::with_dependency("fp16fml", TargetFeatureFoldStrength::EnableOnly("neon")) - } - ("aarch64", "fp16") => { - LLVMFeature::with_dependency("fullfp16", TargetFeatureFoldStrength::EnableOnly("neon")) - } - ("aarch64", "jsconv") => { - LLVMFeature::with_dependency("jsconv", TargetFeatureFoldStrength::EnableOnly("neon")) - } - ("aarch64", "sve") => { - LLVMFeature::with_dependency("sve", TargetFeatureFoldStrength::EnableOnly("neon")) - } - ("aarch64", "sve2") => { - LLVMFeature::with_dependency("sve2", TargetFeatureFoldStrength::EnableOnly("neon")) - } - ("aarch64", "sve2-aes") => { - LLVMFeature::with_dependency("sve2-aes", TargetFeatureFoldStrength::EnableOnly("neon")) - } - ("aarch64", "sve2-sm4") => { - LLVMFeature::with_dependency("sve2-sm4", TargetFeatureFoldStrength::EnableOnly("neon")) - } - ("aarch64", "sve2-sha3") => { - LLVMFeature::with_dependency("sve2-sha3", TargetFeatureFoldStrength::EnableOnly("neon")) - } - ("aarch64", "sve2-bitperm") => LLVMFeature::with_dependency( - "sve2-bitperm", - TargetFeatureFoldStrength::EnableOnly("neon"), - ), - // In LLVM 18, `unaligned-scalar-mem` was merged with `unaligned-vector-mem` into a single feature called - // `fast-unaligned-access`. In LLVM 19, it was split back out. + ("aarch64", "fhm") => Some(LLVMFeature::new("fp16fml")), + ("aarch64", "fp16") => Some(LLVMFeature::new("fullfp16")), + // Filter out features that are not supported by the current LLVM version + ("aarch64", "fpmr") if get_version().0 != 18 => None, + // In LLVM 18, `unaligned-scalar-mem` was merged with `unaligned-vector-mem` into a single + // feature called `fast-unaligned-access`. In LLVM 19, it was split back out. ("riscv32" | "riscv64", "unaligned-scalar-mem") if get_version().0 == 18 => { - LLVMFeature::new("fast-unaligned-access") + Some(LLVMFeature::new("fast-unaligned-access")) } - // For LLVM 18, enable the evex512 target feature if a avx512 target feature is enabled. - ("x86", s) if get_version().0 >= 18 && s.starts_with("avx512") => { - LLVMFeature::with_dependency(s, TargetFeatureFoldStrength::EnableOnly("evex512")) + // Enable the evex512 target feature if an avx512 target feature is enabled. + ("x86", s) if s.starts_with("avx512") => { + Some(LLVMFeature::with_dependency(s, TargetFeatureFoldStrength::EnableOnly("evex512"))) } - (_, s) => LLVMFeature::new(s), + (_, s) => Some(LLVMFeature::new(s)), } } /// Given a map from target_features to whether they are enabled or disabled, /// ensure only valid combinations are allowed. -pub fn check_tied_features( +pub(crate) fn check_tied_features( sess: &Session, features: &FxHashMap<&str, bool>, ) -> Option<&'static [&'static str]> { @@ -303,54 +288,87 @@ pub fn check_tied_features( } } } - return None; + None } /// Used to generate cfg variables and apply features /// Must express features in the way Rust understands them pub fn target_features(sess: &Session, allow_unstable: bool) -> Vec<Symbol> { - let target_machine = create_informational_target_machine(sess); + let mut features = vec![]; + + // Add base features for the target + let target_machine = create_informational_target_machine(sess, true); + features.extend( + sess.target + .supported_target_features() + .iter() + .filter(|(feature, _, _)| { + // skip checking special features, as LLVM may not understands them + if RUSTC_SPECIAL_FEATURES.contains(feature) { + return true; + } + // check that all features in a given smallvec are enabled + if let Some(feat) = to_llvm_features(sess, feature) { + for llvm_feature in feat { + let cstr = SmallCStr::new(llvm_feature); + if !unsafe { llvm::LLVMRustHasFeature(&target_machine, cstr.as_ptr()) } { + return false; + } + } + true + } else { + false + } + }) + .map(|(feature, _, _)| Symbol::intern(feature)), + ); + + // Add enabled features + for (enabled, feature) in + sess.opts.cg.target_feature.split(',').filter_map(|s| match s.chars().next() { + Some('+') => Some((true, Symbol::intern(&s[1..]))), + Some('-') => Some((false, Symbol::intern(&s[1..]))), + _ => None, + }) + { + if enabled { + features.extend(sess.target.implied_target_features(std::iter::once(feature))); + } else { + features.retain(|f| { + !sess.target.implied_target_features(std::iter::once(*f)).contains(&feature) + }); + } + } + + // Filter enabled features based on feature gates sess.target .supported_target_features() .iter() - .filter_map(|&(feature, gate)| { + .filter_map(|&(feature, gate, _)| { if sess.is_nightly_build() || allow_unstable || gate.is_stable() { Some(feature) } else { None } }) - .filter(|feature| { - // skip checking special features, as LLVM may not understands them - if RUSTC_SPECIAL_FEATURES.contains(feature) { - return true; - } - // check that all features in a given smallvec are enabled - for llvm_feature in to_llvm_features(sess, feature) { - let cstr = SmallCStr::new(llvm_feature); - if !unsafe { llvm::LLVMRustHasFeature(&target_machine, cstr.as_ptr()) } { - return false; - } - } - true - }) + .filter(|feature| features.contains(&Symbol::intern(feature))) .map(|feature| Symbol::intern(feature)) .collect() } -pub fn print_version() { +pub(crate) fn print_version() { let (major, minor, patch) = get_version(); println!("LLVM version: {major}.{minor}.{patch}"); } -pub fn get_version() -> (u32, u32, u32) { +pub(crate) fn get_version() -> (u32, u32, u32) { // Can be called without initializing LLVM unsafe { (llvm::LLVMRustVersionMajor(), llvm::LLVMRustVersionMinor(), llvm::LLVMRustVersionPatch()) } } -pub fn print_passes() { +pub(crate) fn print_passes() { // Can be called without initializing LLVM unsafe { llvm::LLVMRustPrintPasses(); @@ -387,9 +405,10 @@ fn print_target_features(out: &mut String, sess: &Session, tm: &llvm::TargetMach .target .supported_target_features() .iter() - .map(|(feature, _gate)| { - // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings. - let llvm_feature = to_llvm_features(sess, *feature).llvm_feature_name; + .filter_map(|(feature, _gate, _implied)| { + // LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these + // strings. + let llvm_feature = to_llvm_features(sess, *feature)?.llvm_feature_name; let desc = match llvm_target_features.binary_search_by_key(&llvm_feature, |(f, _d)| f).ok() { Some(index) => { @@ -399,7 +418,7 @@ fn print_target_features(out: &mut String, sess: &Session, tm: &llvm::TargetMach None => "", }; - (*feature, desc) + Some((*feature, desc)) }) .collect::<Vec<_>>(); @@ -441,7 +460,7 @@ fn print_target_features(out: &mut String, sess: &Session, tm: &llvm::TargetMach pub(crate) fn print(req: &PrintRequest, mut out: &mut String, sess: &Session) { require_inited(); - let tm = create_informational_target_machine(sess); + let tm = create_informational_target_machine(sess, false); match req.kind { PrintKind::TargetCPUs => { // SAFETY generate a C compatible string from a byte slice to pass @@ -480,7 +499,7 @@ fn handle_native(name: &str) -> &str { } } -pub fn target_cpu(sess: &Session) -> &str { +pub(crate) fn target_cpu(sess: &Session) -> &str { match sess.opts.cg.target_cpu { Some(ref name) => handle_native(name), None => handle_native(sess.target.cpu.as_ref()), @@ -489,7 +508,11 @@ pub fn target_cpu(sess: &Session) -> &str { /// The list of LLVM features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`, /// `--target` and similar). -pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<String> { +pub(crate) fn global_llvm_features( + sess: &Session, + diagnostics: bool, + only_base_features: bool, +) -> Vec<String> { // Features that come earlier are overridden by conflicting features later in the string. // Typically we'll want more explicit settings to override the implicit ones, so: // @@ -549,94 +572,117 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str } // -Ctarget-features - let supported_features = sess.target.supported_target_features(); - let (llvm_major, _, _) = get_version(); - let mut featsmap = FxHashMap::default(); - let feats = sess - .opts - .cg - .target_feature - .split(',') - .filter_map(|s| { - let enable_disable = match s.chars().next() { - None => return None, - Some(c @ ('+' | '-')) => c, - Some(_) => { - if diagnostics { - sess.dcx().emit_warn(UnknownCTargetFeaturePrefix { feature: s }); + if !only_base_features { + let supported_features = sess.target.supported_target_features(); + let mut featsmap = FxHashMap::default(); + + // insert implied features + let mut all_rust_features = vec![]; + for feature in sess.opts.cg.target_feature.split(',') { + match feature.strip_prefix('+') { + Some(feature) => all_rust_features.extend( + UnordSet::from( + sess.target + .implied_target_features(std::iter::once(Symbol::intern(feature))), + ) + .to_sorted_stable_ord() + .iter() + .map(|s| format!("+{}", s.as_str())), + ), + _ => all_rust_features.push(feature.to_string()), + } + } + + let feats = all_rust_features + .iter() + .filter_map(|s| { + let enable_disable = match s.chars().next() { + None => return None, + Some(c @ ('+' | '-')) => c, + Some(_) => { + if diagnostics { + sess.dcx().emit_warn(UnknownCTargetFeaturePrefix { feature: s }); + } + return None; } - return None; - } - }; + }; - let feature = backend_feature_name(sess, s)?; - // Warn against use of LLVM specific feature names and unstable features on the CLI. - if diagnostics { - let feature_state = supported_features.iter().find(|&&(v, _)| v == feature); - if feature_state.is_none() { - let rust_feature = supported_features.iter().find_map(|&(rust_feature, _)| { - let llvm_features = to_llvm_features(sess, rust_feature); - if llvm_features.contains(feature) && !llvm_features.contains(rust_feature) - { - Some(rust_feature) + let feature = backend_feature_name(sess, s)?; + // Warn against use of LLVM specific feature names and unstable features on the CLI. + if diagnostics { + let feature_state = supported_features.iter().find(|&&(v, _, _)| v == feature); + if feature_state.is_none() { + let rust_feature = + supported_features.iter().find_map(|&(rust_feature, _, _)| { + let llvm_features = to_llvm_features(sess, rust_feature)?; + if llvm_features.contains(feature) + && !llvm_features.contains(rust_feature) + { + Some(rust_feature) + } else { + None + } + }); + let unknown_feature = if let Some(rust_feature) = rust_feature { + UnknownCTargetFeature { + feature, + rust_feature: PossibleFeature::Some { rust_feature }, + } } else { - None - } - }); - let unknown_feature = if let Some(rust_feature) = rust_feature { - UnknownCTargetFeature { - feature, - rust_feature: PossibleFeature::Some { rust_feature }, - } - } else { - UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None } - }; - sess.dcx().emit_warn(unknown_feature); - } else if feature_state - .is_some_and(|(_name, feature_gate)| !feature_gate.is_stable()) - { - // An unstable feature. Warn about using it. - sess.dcx().emit_warn(UnstableCTargetFeature { feature }); + UnknownCTargetFeature { feature, rust_feature: PossibleFeature::None } + }; + sess.dcx().emit_warn(unknown_feature); + } else if feature_state + .is_some_and(|(_name, feature_gate, _implied)| !feature_gate.is_stable()) + { + // An unstable feature. Warn about using it. + sess.dcx().emit_warn(UnstableCTargetFeature { feature }); + } } - } - if diagnostics { - // FIXME(nagisa): figure out how to not allocate a full hashset here. - featsmap.insert(feature, enable_disable == '+'); - } + if diagnostics { + // FIXME(nagisa): figure out how to not allocate a full hashset here. + featsmap.insert(feature, enable_disable == '+'); + } - // rustc-specific features do not get passed down to LLVM… - if RUSTC_SPECIFIC_FEATURES.contains(&feature) { - return None; - } + // rustc-specific features do not get passed down to LLVM… + if RUSTC_SPECIFIC_FEATURES.contains(&feature) { + return None; + } - // if the target-feature is "backchain" and LLVM version is greater than 18 - // then we also need to add "+backchain" to the target-features attribute. - // otherwise, we will only add the naked `backchain` attribute to the attribute-group. - if feature == "backchain" && llvm_major < 18 { - return None; - } - // ... otherwise though we run through `to_llvm_features` when - // passing requests down to LLVM. This means that all in-language - // features also work on the command line instead of having two - // different names when the LLVM name and the Rust name differ. - let llvm_feature = to_llvm_features(sess, feature); - - Some( - std::iter::once(format!("{}{}", enable_disable, llvm_feature.llvm_feature_name)) - .chain(llvm_feature.dependency.into_iter().filter_map(move |feat| { - match (enable_disable, feat) { + // ... otherwise though we run through `to_llvm_features` when + // passing requests down to LLVM. This means that all in-language + // features also work on the command line instead of having two + // different names when the LLVM name and the Rust name differ. + let llvm_feature = to_llvm_features(sess, feature)?; + + Some( + std::iter::once(format!( + "{}{}", + enable_disable, llvm_feature.llvm_feature_name + )) + .chain(llvm_feature.dependency.into_iter().filter_map( + move |feat| match (enable_disable, feat) { ('-' | '+', TargetFeatureFoldStrength::Both(f)) | ('+', TargetFeatureFoldStrength::EnableOnly(f)) => { Some(format!("{enable_disable}{f}")) } _ => None, - } - })), - ) - }) - .flatten(); - features.extend(feats); + }, + )), + ) + }) + .flatten(); + features.extend(feats); + + if diagnostics && let Some(f) = check_tied_features(sess, &featsmap) { + sess.dcx().emit_err(TargetFeatureDisableOrEnable { + features: f, + span: None, + missing_features: None, + }); + } + } // -Zfixed-x18 if sess.opts.unstable_opts.fixed_x18 { @@ -647,14 +693,6 @@ pub(crate) fn global_llvm_features(sess: &Session, diagnostics: bool) -> Vec<Str } } - if diagnostics && let Some(f) = check_tied_features(sess, &featsmap) { - sess.dcx().emit_err(TargetFeatureDisableOrEnable { - features: f, - span: None, - missing_features: None, - }); - } - features } @@ -666,6 +704,9 @@ fn backend_feature_name<'a>(sess: &Session, s: &'a str) -> Option<&'a str> { let feature = s .strip_prefix(&['+', '-'][..]) .unwrap_or_else(|| sess.dcx().emit_fatal(InvalidTargetFeaturePrefix { feature: s })); + if s.is_empty() { + return None; + } // Rustc-specific feature requests like `+crt-static` or `-crt-static` // are not passed down to LLVM. if RUSTC_SPECIFIC_FEATURES.contains(&feature) { @@ -674,7 +715,7 @@ fn backend_feature_name<'a>(sess: &Session, s: &'a str) -> Option<&'a str> { Some(feature) } -pub fn tune_cpu(sess: &Session) -> Option<&str> { +pub(crate) fn tune_cpu(sess: &Session) -> Option<&str> { let name = sess.opts.unstable_opts.tune_cpu.as_ref()?; Some(handle_native(name)) } diff --git a/compiler/rustc_codegen_llvm/src/mono_item.rs b/compiler/rustc_codegen_llvm/src/mono_item.rs index 282a186be99..02e1995620b 100644 --- a/compiler/rustc_codegen_llvm/src/mono_item.rs +++ b/compiler/rustc_codegen_llvm/src/mono_item.rs @@ -1,9 +1,3 @@ -use crate::attributes; -use crate::base; -use crate::context::CodegenCx; -use crate::errors::SymbolAlreadyDefined; -use crate::llvm; -use crate::type_of::LayoutLlvmExt; use rustc_codegen_ssa::traits::*; use rustc_hir::def::DefKind; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; @@ -15,7 +9,12 @@ use rustc_session::config::CrateType; use rustc_target::spec::RelocModel; use tracing::debug; -impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> { +use crate::context::CodegenCx; +use crate::errors::SymbolAlreadyDefined; +use crate::type_of::LayoutLlvmExt; +use crate::{base, llvm}; + +impl<'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> { fn predefine_static( &self, def_id: DefId, @@ -25,8 +24,8 @@ impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> { ) { let instance = Instance::mono(self.tcx, def_id); let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() }; - // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure out - // the llvm type from the actual evaluated initializer. + // Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure + // out the llvm type from the actual evaluated initializer. let ty = if nested { self.tcx.types.unit } else { @@ -88,8 +87,6 @@ impl<'tcx> PreDefineMethods<'tcx> for CodegenCx<'_, 'tcx> { debug!("predefine_fn: instance = {:?}", instance); - attributes::from_fn_attrs(self, lldecl, instance); - unsafe { if self.should_assume_dso_local(lldecl, false) { llvm::LLVMRustSetDSOLocal(lldecl, true); diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index f1141c57ced..2c2b9030b7c 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -1,12 +1,6 @@ -pub use crate::llvm::Type; +use std::{fmt, ptr}; -use crate::abi::{FnAbiLlvmExt, LlvmType}; -use crate::common; -use crate::context::CodegenCx; -use crate::llvm; -use crate::llvm::{Bool, False, True}; -use crate::type_of::LayoutLlvmExt; -use crate::value::Value; +use libc::{c_char, c_uint}; use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::traits::*; use rustc_data_structures::small_c_str::SmallCStr; @@ -16,10 +10,13 @@ use rustc_middle::ty::{self, Ty}; use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; use rustc_target::abi::{AddressSpace, Align, Integer, Size}; -use std::fmt; -use std::ptr; - -use libc::{c_char, c_uint}; +use crate::abi::{FnAbiLlvmExt, LlvmType}; +use crate::context::CodegenCx; +pub(crate) use crate::llvm::Type; +use crate::llvm::{Bool, False, True}; +use crate::type_of::LayoutLlvmExt; +use crate::value::Value; +use crate::{common, llvm}; impl PartialEq for Type { fn eq(&self, other: &Self) -> bool { @@ -144,7 +141,7 @@ impl<'ll> CodegenCx<'ll, '_> { } } -impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { +impl<'ll, 'tcx> BaseTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn type_i8(&self) -> &'ll Type { unsafe { llvm::LLVMInt8TypeInContext(self.llcx) } } @@ -248,7 +245,7 @@ impl Type { } } -impl<'ll, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { +impl<'ll, 'tcx> LayoutTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn backend_type(&self, layout: TyAndLayout<'tcx>) -> &'ll Type { layout.llvm_type(self) } @@ -283,7 +280,7 @@ impl<'ll, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } -impl<'ll, 'tcx> TypeMembershipMethods<'tcx> for CodegenCx<'ll, 'tcx> { +impl<'ll, 'tcx> TypeMembershipCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { fn add_type_metadata(&self, function: &'ll Value, typeid: String) { let typeid_metadata = self.typeid_metadata(typeid).unwrap(); let v = [self.const_usize(0), typeid_metadata]; diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 7be941ed749..6e429a1674a 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -1,16 +1,15 @@ -use crate::common::*; -use crate::type_::Type; +use std::fmt::Write; + use rustc_codegen_ssa::traits::*; use rustc_middle::bug; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::{self, CoroutineArgsExt, Ty, TypeVisitableExt}; -use rustc_target::abi::{Abi, Align, FieldsShape}; -use rustc_target::abi::{Float, Int, Pointer}; -use rustc_target::abi::{Scalar, Size, Variants}; +use rustc_target::abi::{Abi, Align, FieldsShape, Float, Int, Pointer, Scalar, Size, Variants}; use tracing::debug; -use std::fmt::Write; +use crate::common::*; +use crate::type_::Type; fn uncached_llvm_type<'a, 'tcx>( cx: &CodegenCx<'a, 'tcx>, @@ -140,21 +139,21 @@ fn struct_llfields<'a, 'tcx>( } impl<'a, 'tcx> CodegenCx<'a, 'tcx> { - pub fn align_of(&self, ty: Ty<'tcx>) -> Align { + pub(crate) fn align_of(&self, ty: Ty<'tcx>) -> Align { self.layout_of(ty).align.abi } - pub fn size_of(&self, ty: Ty<'tcx>) -> Size { + pub(crate) fn size_of(&self, ty: Ty<'tcx>) -> Size { self.layout_of(ty).size } - pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) { + pub(crate) fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) { let layout = self.layout_of(ty); (layout.size, layout.align.abi) } } -pub trait LayoutLlvmExt<'tcx> { +pub(crate) trait LayoutLlvmExt<'tcx> { fn is_llvm_immediate(&self) -> bool; fn is_llvm_scalar_pair(&self) -> bool; fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 220bb77d3fd..781cee81180 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -1,16 +1,15 @@ -use crate::builder::Builder; -use crate::type_::Type; -use crate::type_of::LayoutLlvmExt; -use crate::value::Value; +use rustc_codegen_ssa::common::IntPredicate; use rustc_codegen_ssa::mir::operand::OperandRef; -use rustc_codegen_ssa::{ - common::IntPredicate, - traits::{BaseTypeMethods, BuilderMethods, ConstMethods}, -}; +use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods}; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; use rustc_middle::ty::Ty; use rustc_target::abi::{Align, Endian, HasDataLayout, Size}; +use crate::builder::Builder; +use crate::type_::Type; +use crate::type_of::LayoutLlvmExt; +use crate::value::Value; + fn round_pointer_up_to_alignment<'ll>( bx: &mut Builder<'_, 'll, '_>, addr: &'ll Value, diff --git a/compiler/rustc_codegen_llvm/src/value.rs b/compiler/rustc_codegen_llvm/src/value.rs index 1338a229566..2eabac3be8c 100644 --- a/compiler/rustc_codegen_llvm/src/value.rs +++ b/compiler/rustc_codegen_llvm/src/value.rs @@ -1,10 +1,8 @@ -pub use crate::llvm::Value; +use std::hash::{Hash, Hasher}; +use std::{fmt, ptr}; use crate::llvm; - -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::ptr; +pub(crate) use crate::llvm::Value; impl PartialEq for Value { fn eq(&self, other: &Self) -> bool {  | 
