diff options
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
29 files changed, 979 insertions, 910 deletions
diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 8c75125e009..71059338151 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -654,7 +654,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } } -impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { +impl AbiBuilderMethods for Builder<'_, '_, '_> { fn get_param(&mut self, index: usize) -> Self::Value { llvm::get_param(self.llfn(), index as c_uint) } diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index 66723cbf882..4a78e694979 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -3,33 +3,32 @@ use rustc_ast::expand::allocator::{ ALLOCATOR_METHODS, AllocatorKind, AllocatorTy, NO_ALLOC_SHIM_IS_UNSTABLE, alloc_error_handler_name, default_fn_name, global_fn_name, }; +use rustc_codegen_ssa::traits::BaseTypeCodegenMethods as _; use rustc_middle::bug; use rustc_middle::ty::TyCtxt; use rustc_session::config::{DebugInfo, OomStrategy}; +use rustc_symbol_mangling::mangle_internal_symbol; -use crate::common::AsCCharPtr; -use crate::llvm::{self, Context, False, Module, True, Type}; -use crate::{ModuleLlvm, attributes, debuginfo}; +use crate::builder::SBuilder; +use crate::declare::declare_simple_fn; +use crate::llvm::{self, False, True, Type}; +use crate::{SimpleCx, attributes, debuginfo}; pub(crate) unsafe fn codegen( tcx: TyCtxt<'_>, - module_llvm: &mut ModuleLlvm, + cx: SimpleCx<'_>, module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind, ) { - let llcx = &*module_llvm.llcx; - let llmod = module_llvm.llmod(); - let usize = unsafe { - match tcx.sess.target.pointer_width { - 16 => llvm::LLVMInt16TypeInContext(llcx), - 32 => llvm::LLVMInt32TypeInContext(llcx), - 64 => llvm::LLVMInt64TypeInContext(llcx), - tws => bug!("Unsupported target word size for int: {}", tws), - } + let usize = match tcx.sess.target.pointer_width { + 16 => cx.type_i16(), + 32 => cx.type_i32(), + 64 => cx.type_i64(), + tws => bug!("Unsupported target word size for int: {}", tws), }; - let i8 = unsafe { llvm::LLVMInt8TypeInContext(llcx) }; - let i8p = unsafe { llvm::LLVMPointerTypeInContext(llcx, 0) }; + let i8 = cx.type_i8(); + let i8p = cx.type_ptr(); if kind == AllocatorKind::Default { for method in ALLOCATOR_METHODS { @@ -55,20 +54,19 @@ pub(crate) unsafe fn codegen( } }; - let from_name = global_fn_name(method.name); - let to_name = default_fn_name(method.name); + let from_name = mangle_internal_symbol(tcx, &global_fn_name(method.name)); + let to_name = mangle_internal_symbol(tcx, &default_fn_name(method.name)); - create_wrapper_function(tcx, llcx, llmod, &from_name, &to_name, &args, output, false); + create_wrapper_function(tcx, &cx, &from_name, &to_name, &args, output, false); } } // rust alloc error handler create_wrapper_function( tcx, - llcx, - llmod, - "__rust_alloc_error_handler", - alloc_error_handler_name(alloc_error_handler_kind), + &cx, + &mangle_internal_symbol(tcx, "__rust_alloc_error_handler"), + &mangle_internal_symbol(tcx, alloc_error_handler_name(alloc_error_handler_kind)), &[usize, usize], // size, align None, true, @@ -76,22 +74,22 @@ pub(crate) unsafe fn codegen( unsafe { // __rust_alloc_error_handler_should_panic - let name = OomStrategy::SYMBOL; - let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_c_char_ptr(), name.len(), i8); + let name = mangle_internal_symbol(tcx, OomStrategy::SYMBOL); + let ll_g = cx.declare_global(&name, i8); llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility())); let val = tcx.sess.opts.unstable_opts.oom.should_panic(); let llval = llvm::LLVMConstInt(i8, val as u64, False); llvm::set_initializer(ll_g, llval); - let name = NO_ALLOC_SHIM_IS_UNSTABLE; - let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_c_char_ptr(), name.len(), i8); + let name = mangle_internal_symbol(tcx, NO_ALLOC_SHIM_IS_UNSTABLE); + let ll_g = cx.declare_global(&name, i8); llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility())); let llval = llvm::LLVMConstInt(i8, 0, False); llvm::set_initializer(ll_g, llval); } if tcx.sess.opts.debuginfo != DebugInfo::None { - let dbg_cx = debuginfo::CodegenUnitDebugContext::new(llmod); + let dbg_cx = debuginfo::CodegenUnitDebugContext::new(cx.llmod); debuginfo::metadata::build_compile_unit_di_node(tcx, module_name, &dbg_cx); dbg_cx.finalize(tcx.sess); } @@ -99,77 +97,64 @@ pub(crate) unsafe fn codegen( fn create_wrapper_function( tcx: TyCtxt<'_>, - llcx: &Context, - llmod: &Module, + cx: &SimpleCx<'_>, from_name: &str, to_name: &str, args: &[&Type], output: Option<&Type>, no_return: bool, ) { - unsafe { - let ty = llvm::LLVMFunctionType( - output.unwrap_or_else(|| llvm::LLVMVoidTypeInContext(llcx)), - args.as_ptr(), - args.len() as c_uint, - False, - ); - let llfn = llvm::LLVMRustGetOrInsertFunction( - llmod, - from_name.as_c_char_ptr(), - from_name.len(), - ty, - ); - let no_return = if no_return { - // -> ! DIFlagNoReturn - let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx); - attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]); - Some(no_return) - } else { - None - }; - - llvm::set_visibility(llfn, llvm::Visibility::from_generic(tcx.sess.default_visibility())); - - if tcx.sess.must_emit_unwind_tables() { - let uwtable = - attributes::uwtable_attr(llcx, tcx.sess.opts.unstable_opts.use_sync_unwind); - attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]); - } + let ty = cx.type_func(args, output.unwrap_or_else(|| cx.type_void())); + let llfn = declare_simple_fn( + &cx, + from_name, + llvm::CallConv::CCallConv, + llvm::UnnamedAddr::Global, + llvm::Visibility::from_generic(tcx.sess.default_visibility()), + ty, + ); + let no_return = if no_return { + // -> ! DIFlagNoReturn + let no_return = llvm::AttributeKind::NoReturn.create_attr(cx.llcx); + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]); + Some(no_return) + } else { + None + }; - let callee = - llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_c_char_ptr(), to_name.len(), ty); - if let Some(no_return) = no_return { - // -> ! DIFlagNoReturn - attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]); - } - llvm::set_visibility(callee, llvm::Visibility::Hidden); - - let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, c"entry".as_ptr()); - - let llbuilder = llvm::LLVMCreateBuilderInContext(llcx); - llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb); - let args = args - .iter() - .enumerate() - .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint)) - .collect::<Vec<_>>(); - let ret = llvm::LLVMBuildCallWithOperandBundles( - llbuilder, - ty, - callee, - args.as_ptr(), - args.len() as c_uint, - [].as_ptr(), - 0 as c_uint, - c"".as_ptr(), - ); - llvm::LLVMSetTailCall(ret, True); - if output.is_some() { - llvm::LLVMBuildRet(llbuilder, ret); - } else { - llvm::LLVMBuildRetVoid(llbuilder); - } - llvm::LLVMDisposeBuilder(llbuilder); + if tcx.sess.must_emit_unwind_tables() { + let uwtable = + attributes::uwtable_attr(cx.llcx, tcx.sess.opts.unstable_opts.use_sync_unwind); + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]); + } + + let callee = declare_simple_fn( + &cx, + to_name, + llvm::CallConv::CCallConv, + llvm::UnnamedAddr::Global, + llvm::Visibility::Hidden, + ty, + ); + if let Some(no_return) = no_return { + // -> ! DIFlagNoReturn + attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]); + } + llvm::set_visibility(callee, llvm::Visibility::Hidden); + + let llbb = unsafe { llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, c"entry".as_ptr()) }; + + let mut bx = SBuilder::build(&cx, llbb); + let args = args + .iter() + .enumerate() + .map(|(i, _)| llvm::get_param(llfn, i as c_uint)) + .collect::<Vec<_>>(); + let ret = bx.call(ty, callee, &args, None); + llvm::LLVMSetTailCall(ret, True); + if output.is_some() { + bx.ret(ret); + } else { + bx.ret_void() } } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index be5673eddf9..88daa025740 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -1,6 +1,5 @@ use std::assert_matches::assert_matches; -use libc::{c_char, c_uint}; use rustc_abi::{BackendRepr, Float, Integer, Primitive, Scalar}; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_codegen_ssa::mir::operand::OperandValue; @@ -483,12 +482,13 @@ pub(crate) fn inline_asm_call<'ll>( debug!("Asm Output Type: {:?}", output); let fty = bx.cx.type_func(&argtys, output); - unsafe { - // Ask LLVM to verify that the constraints are well-formed. - let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_c_char_ptr(), cons.len()); - debug!("constraint verification result: {:?}", constraints_ok); - if constraints_ok { - let v = llvm::LLVMRustInlineAsm( + // Ask LLVM to verify that the constraints are well-formed. + let constraints_ok = + unsafe { llvm::LLVMRustInlineAsmVerify(fty, cons.as_c_char_ptr(), cons.len()) }; + debug!("constraint verification result: {:?}", constraints_ok); + if constraints_ok { + let v = unsafe { + llvm::LLVMRustInlineAsm( fty, asm.as_c_char_ptr(), asm.len(), @@ -498,54 +498,50 @@ pub(crate) fn inline_asm_call<'ll>( alignstack, dia, can_throw, - ); - - let call = if !labels.is_empty() { - assert!(catch_funclet.is_none()); - bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None, None) - } else if let Some((catch, funclet)) = catch_funclet { - bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet, None) - } else { - bx.call(fty, None, None, v, inputs, None, None) - }; + ) + }; - // Store mark in a metadata node so we can map LLVM errors - // back to source locations. See #17552. - let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext( - bx.llcx, - key.as_ptr().cast::<c_char>(), - key.len() as c_uint, - ); + let call = if !labels.is_empty() { + assert!(catch_funclet.is_none()); + bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None, None) + } else if let Some((catch, funclet)) = catch_funclet { + bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet, None) + } else { + bx.call(fty, None, None, v, inputs, None, None) + }; - // `srcloc` contains one 64-bit integer for each line of assembly code, - // where the lower 32 bits hold the lo byte position and the upper 32 bits - // hold the hi byte position. - let mut srcloc = vec![]; - if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 { - // LLVM inserts an extra line to add the ".intel_syntax", so add - // a dummy srcloc entry for it. - // - // Don't do this if we only have 1 line span since that may be - // due to the asm template string coming from a macro. LLVM will - // default to the first srcloc for lines that don't have an - // associated srcloc. - srcloc.push(llvm::LLVMValueAsMetadata(bx.const_u64(0))); - } - srcloc.extend(line_spans.iter().map(|span| { - llvm::LLVMValueAsMetadata(bx.const_u64( - u64::from(span.lo().to_u32()) | (u64::from(span.hi().to_u32()) << 32), - )) - })); - let md = llvm::LLVMMDNodeInContext2(bx.llcx, srcloc.as_ptr(), srcloc.len()); - let md = llvm::LLVMMetadataAsValue(&bx.llcx, md); - llvm::LLVMSetMetadata(call, kind, md); + // Store mark in a metadata node so we can map LLVM errors + // back to source locations. See #17552. + let key = "srcloc"; + let kind = bx.get_md_kind_id(key); - Some(call) - } else { - // LLVM has detected an issue with our constraints, bail out - None + // `srcloc` contains one 64-bit integer for each line of assembly code, + // where the lower 32 bits hold the lo byte position and the upper 32 bits + // hold the hi byte position. + let mut srcloc = vec![]; + if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 { + // LLVM inserts an extra line to add the ".intel_syntax", so add + // a dummy srcloc entry for it. + // + // Don't do this if we only have 1 line span since that may be + // due to the asm template string coming from a macro. LLVM will + // default to the first srcloc for lines that don't have an + // associated srcloc. + srcloc.push(llvm::LLVMValueAsMetadata(bx.const_u64(0))); } + srcloc.extend(line_spans.iter().map(|span| { + llvm::LLVMValueAsMetadata( + bx.const_u64(u64::from(span.lo().to_u32()) | (u64::from(span.hi().to_u32()) << 32)), + ) + })); + let md = unsafe { llvm::LLVMMDNodeInContext2(bx.llcx, srcloc.as_ptr(), srcloc.len()) }; + let md = bx.get_metadata_value(md); + llvm::LLVMSetMetadata(call, kind, md); + + Some(call) + } else { + // LLVM has detected an issue with our constraints, bail out + None } } @@ -939,9 +935,10 @@ fn llvm_fixup_input<'ll, 'tcx>( } bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); let vec_ty = bx.cx.type_vector(elem_ty, count); let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect(); @@ -954,7 +951,7 @@ fn llvm_fixup_input<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)), ( X86( @@ -989,7 +986,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } @@ -1026,7 +1023,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } @@ -1099,9 +1096,10 @@ fn llvm_fixup_output<'ll, 'tcx>( } value } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); let vec_ty = bx.cx.type_vector(elem_ty, count * 2); let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect(); @@ -1114,7 +1112,7 @@ fn llvm_fixup_output<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)), ( X86( @@ -1145,7 +1143,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } @@ -1182,7 +1180,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } @@ -1243,9 +1241,10 @@ fn llvm_fixup_output_type<'ll, 'tcx>( let count = 16 / layout.size.bytes(); cx.type_vector(elem_ty, count) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(cx, element); cx.type_vector(elem_ty, count * 2) } @@ -1256,7 +1255,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8), ( X86( @@ -1284,7 +1283,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } @@ -1321,7 +1320,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 05a2cd1c5a3..668795191a2 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use std::ffi::{CStr, CString}; use std::fs::File; use std::path::Path; +use std::ptr::NonNull; use std::sync::Arc; use std::{io, iter, slice}; @@ -305,11 +306,8 @@ fn fat_lto( assert!(!serialized_modules.is_empty(), "must have at least one serialized module"); let (buffer, name) = serialized_modules.remove(0); info!("no in-memory regular modules to choose from, parsing {:?}", name); - ModuleCodegen { - module_llvm: ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx)?, - name: name.into_string().unwrap(), - kind: ModuleKind::Regular, - } + let llvm_module = ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx)?; + ModuleCodegen::new_regular(name.into_string().unwrap(), llvm_module) } }; { @@ -655,14 +653,14 @@ pub(crate) fn run_pass_manager( } unsafe { - write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage, stage)?; + write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?; } if cfg!(llvm_enzyme) && enable_ad { let opt_stage = llvm::OptStage::FatLTO; let stage = write::AutodiffStage::PostAD; unsafe { - write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage, stage)?; + write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?; } // This is the final IR, so people should be able to inspect the optimized autodiff output. @@ -729,6 +727,11 @@ impl ThinBuffer { ThinBuffer(buffer) } } + + pub unsafe fn from_raw_ptr(ptr: *mut llvm::ThinLTOBuffer) -> ThinBuffer { + let mut ptr = NonNull::new(ptr).unwrap(); + ThinBuffer(unsafe { ptr.as_mut() }) + } } impl ThinBufferMethods for ThinBuffer { @@ -772,11 +775,11 @@ pub(crate) unsafe fn optimize_thin_module( // crates but for locally codegened modules we may be able to reuse // that LLVM Context and Module. let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx)?; - let mut module = ModuleCodegen { - module_llvm, - name: thin_module.name().to_string(), - kind: ModuleKind::Regular, - }; + let mut module = ModuleCodegen::new_regular(thin_module.name(), module_llvm); + // Given that the newly created module lacks a thinlto buffer for embedding, we need to re-add it here. + if cgcx.config(ModuleKind::Regular).embed_bitcode() { + module.thin_lto_buffer = Some(thin_module.data().to_vec()); + } { let target = &*module.module_llvm.tm; let llmod = module.module_llvm.llmod(); diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 29d6121844f..bead4c82a81 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -1,6 +1,7 @@ use std::ffi::{CStr, CString}; use std::io::{self, Write}; use std::path::{Path, PathBuf}; +use std::ptr::null_mut; use std::sync::Arc; use std::{fs, slice, str}; @@ -15,7 +16,7 @@ use rustc_codegen_ssa::back::write::{ TargetMachineFactoryFn, }; use rustc_codegen_ssa::traits::*; -use rustc_codegen_ssa::{CompiledModule, ModuleCodegen}; +use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, ModuleKind}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::small_c_str::SmallCStr; use rustc_errors::{DiagCtxtHandle, FatalError, Level}; @@ -551,6 +552,7 @@ pub(crate) unsafe fn llvm_optimize( cgcx: &CodegenContext<LlvmCodegenBackend>, dcx: DiagCtxtHandle<'_>, module: &ModuleCodegen<ModuleLlvm>, + thin_lto_buffer: Option<&mut *mut llvm::ThinLTOBuffer>, config: &ModuleConfig, opt_level: config::OptLevel, opt_stage: llvm::OptStage, @@ -584,7 +586,17 @@ pub(crate) unsafe fn llvm_optimize( vectorize_loop = config.vectorize_loop; } trace!(?unroll_loops, ?vectorize_slp, ?vectorize_loop, ?run_enzyme); - let using_thin_buffers = opt_stage == llvm::OptStage::PreLinkThinLTO || config.bitcode_needed(); + if thin_lto_buffer.is_some() { + assert!( + matches!( + opt_stage, + llvm::OptStage::PreLinkNoLTO + | llvm::OptStage::PreLinkFatLTO + | llvm::OptStage::PreLinkThinLTO + ), + "the bitcode for LTO can only be obtained at the pre-link stage" + ); + } let pgo_gen_path = get_pgo_gen_path(config); let pgo_use_path = get_pgo_use_path(config); let pgo_sample_use_path = get_pgo_sample_use_path(config); @@ -644,7 +656,9 @@ pub(crate) unsafe fn llvm_optimize( config.no_prepopulate_passes, config.verify_llvm_ir, config.lint_llvm_ir, - using_thin_buffers, + thin_lto_buffer, + config.emit_thin_lto, + config.emit_thin_lto_summary, config.merge_functions, unroll_loops, vectorize_slp, @@ -675,7 +689,7 @@ pub(crate) unsafe fn llvm_optimize( pub(crate) unsafe fn optimize( cgcx: &CodegenContext<LlvmCodegenBackend>, dcx: DiagCtxtHandle<'_>, - module: &ModuleCodegen<ModuleLlvm>, + module: &mut ModuleCodegen<ModuleLlvm>, config: &ModuleConfig, ) -> Result<(), FatalError> { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name); @@ -705,9 +719,53 @@ pub(crate) unsafe fn optimize( // Otherwise we pretend AD is already done and run the normal opt pipeline (=PostAD). let consider_ad = cfg!(llvm_enzyme) && config.autodiff.contains(&config::AutoDiff::Enable); let autodiff_stage = if consider_ad { AutodiffStage::PreAD } else { AutodiffStage::PostAD }; - return unsafe { - llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage, autodiff_stage) + // The embedded bitcode is used to run LTO/ThinLTO. + // The bitcode obtained during the `codegen` phase is no longer suitable for performing LTO. + // It may have undergone LTO due to ThinLocal, so we need to obtain the embedded bitcode at + // this point. + let mut thin_lto_buffer = if (module.kind == ModuleKind::Regular + && config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)) + || config.emit_thin_lto_summary + { + Some(null_mut()) + } else { + None }; + unsafe { + llvm_optimize( + cgcx, + dcx, + module, + thin_lto_buffer.as_mut(), + config, + opt_level, + opt_stage, + autodiff_stage, + ) + }?; + if let Some(thin_lto_buffer) = thin_lto_buffer { + let thin_lto_buffer = unsafe { ThinBuffer::from_raw_ptr(thin_lto_buffer) }; + module.thin_lto_buffer = Some(thin_lto_buffer.data().to_vec()); + let bc_summary_out = + cgcx.output_filenames.temp_path(OutputType::ThinLinkBitcode, module_name); + if config.emit_thin_lto_summary + && let Some(thin_link_bitcode_filename) = bc_summary_out.file_name() + { + let summary_data = thin_lto_buffer.thin_link_data(); + cgcx.prof.artifact_size( + "llvm_bitcode_summary", + thin_link_bitcode_filename.to_string_lossy(), + summary_data.len() as u64, + ); + let _timer = cgcx.prof.generic_activity_with_arg( + "LLVM_module_codegen_emit_bitcode_summary", + &*module.name, + ); + if let Err(err) = fs::write(&bc_summary_out, summary_data) { + dcx.emit_err(WriteBytecode { path: &bc_summary_out, err }); + } + } + } } Ok(()) } @@ -760,59 +818,41 @@ pub(crate) unsafe fn codegen( // otherwise requested. let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name); - let bc_summary_out = - cgcx.output_filenames.temp_path(OutputType::ThinLinkBitcode, module_name); let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name); if config.bitcode_needed() { - let _timer = cgcx - .prof - .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &*module.name); - let thin = ThinBuffer::new(llmod, config.emit_thin_lto, config.emit_thin_lto_summary); - let data = thin.data(); - - if let Some(bitcode_filename) = bc_out.file_name() { - cgcx.prof.artifact_size( - "llvm_bitcode", - bitcode_filename.to_string_lossy(), - data.len() as u64, - ); - } - - if config.emit_thin_lto_summary - && let Some(thin_link_bitcode_filename) = bc_summary_out.file_name() - { - let summary_data = thin.thin_link_data(); - cgcx.prof.artifact_size( - "llvm_bitcode_summary", - thin_link_bitcode_filename.to_string_lossy(), - summary_data.len() as u64, - ); - - let _timer = cgcx.prof.generic_activity_with_arg( - "LLVM_module_codegen_emit_bitcode_summary", - &*module.name, - ); - if let Err(err) = fs::write(&bc_summary_out, summary_data) { - dcx.emit_err(WriteBytecode { path: &bc_summary_out, err }); - } - } - if config.emit_bc || config.emit_obj == EmitObj::Bitcode { + let thin = { + let _timer = cgcx.prof.generic_activity_with_arg( + "LLVM_module_codegen_make_bitcode", + &*module.name, + ); + ThinBuffer::new(llmod, config.emit_thin_lto, false) + }; + let data = thin.data(); let _timer = cgcx .prof .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name); + if let Some(bitcode_filename) = bc_out.file_name() { + cgcx.prof.artifact_size( + "llvm_bitcode", + bitcode_filename.to_string_lossy(), + data.len() as u64, + ); + } if let Err(err) = fs::write(&bc_out, data) { dcx.emit_err(WriteBytecode { path: &bc_out, err }); } } - if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) { + if config.embed_bitcode() && module.kind == ModuleKind::Regular { let _timer = cgcx .prof .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module.name); + let thin_bc = + module.thin_lto_buffer.as_deref().expect("cannot find embedded bitcode"); unsafe { - embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data); + embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, &thin_bc); } } } diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index d35c7945bae..6bd27914dbd 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -13,10 +13,10 @@ use std::time::Instant; +use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_codegen_ssa::traits::*; -use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use rustc_data_structures::small_c_str::SmallCStr; use rustc_middle::dep_graph; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; @@ -133,11 +133,7 @@ pub(crate) fn compile_codegen_unit( } } - ModuleCodegen { - name: cgu_name.to_string(), - module_llvm: llvm_module, - kind: ModuleKind::Regular, - } + ModuleCodegen::new_regular(cgu_name.to_string(), llvm_module) } (module, cost) diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 04b9ed02aab..297f104d124 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -31,7 +31,7 @@ use tracing::{debug, instrument}; use crate::abi::FnAbiLlvmExt; use crate::common::Funclet; -use crate::context::{CodegenCx, SimpleCx}; +use crate::context::{CodegenCx, FullCx, GenericCx, SCx}; use crate::llvm::{ self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, GEPNoWrapFlags, Metadata, True, }; @@ -41,15 +41,15 @@ use crate::value::Value; use crate::{attributes, llvm_util}; #[must_use] -pub(crate) struct GenericBuilder<'a, 'll, CX: Borrow<SimpleCx<'ll>>> { +pub(crate) struct GenericBuilder<'a, 'll, CX: Borrow<SCx<'ll>>> { pub llbuilder: &'ll mut llvm::Builder<'ll>, - pub cx: &'a CX, + pub cx: &'a GenericCx<'ll, CX>, } -pub(crate) type SBuilder<'a, 'll> = GenericBuilder<'a, 'll, SimpleCx<'ll>>; -pub(crate) type Builder<'a, 'll, 'tcx> = GenericBuilder<'a, 'll, CodegenCx<'ll, 'tcx>>; +pub(crate) type SBuilder<'a, 'll> = GenericBuilder<'a, 'll, SCx<'ll>>; +pub(crate) type Builder<'a, 'll, 'tcx> = GenericBuilder<'a, 'll, FullCx<'ll, 'tcx>>; -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> Drop for GenericBuilder<'a, 'll, CX> { +impl<'a, 'll, CX: Borrow<SCx<'ll>>> Drop for GenericBuilder<'a, 'll, CX> { fn drop(&mut self) { unsafe { llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _)); @@ -58,7 +58,7 @@ impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> Drop for GenericBuilder<'a, 'll, CX> { } impl<'a, 'll> SBuilder<'a, 'll> { - fn call( + pub(crate) fn call( &mut self, llty: &'ll Type, llfn: &'ll Value, @@ -88,79 +88,36 @@ impl<'a, 'll> SBuilder<'a, 'll> { }; call } +} - fn with_scx(scx: &'a SimpleCx<'ll>) -> Self { +impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> { + fn with_cx(scx: &'a GenericCx<'ll, CX>) -> Self { // Create a fresh builder from the simple context. - let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(scx.llcx) }; - SBuilder { llbuilder, cx: scx } + let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(scx.deref().borrow().llcx) }; + GenericBuilder { llbuilder, cx: scx } } -} -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { + pub(crate) fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) } } - fn ret_void(&mut self) { - unsafe { - llvm::LLVMBuildRetVoid(self.llbuilder); - } + pub(crate) fn ret_void(&mut self) { + llvm::LLVMBuildRetVoid(self.llbuilder); } - fn ret(&mut self, v: &'ll Value) { + pub(crate) fn ret(&mut self, v: &'ll Value) { unsafe { llvm::LLVMBuildRet(self.llbuilder, v); } } -} -impl<'a, 'll> SBuilder<'a, 'll> { - fn build(cx: &'a SimpleCx<'ll>, llbb: &'ll BasicBlock) -> SBuilder<'a, 'll> { - let bx = SBuilder::with_scx(cx); + + pub(crate) fn build(cx: &'a GenericCx<'ll, CX>, llbb: &'ll BasicBlock) -> Self { + let bx = Self::with_cx(cx); unsafe { llvm::LLVMPositionBuilderAtEnd(bx.llbuilder, llbb); } bx } - - fn check_call<'b>( - &mut self, - typ: &str, - fn_ty: &'ll Type, - llfn: &'ll Value, - args: &'b [&'ll Value], - ) -> Cow<'b, [&'ll Value]> { - assert!( - self.cx.type_kind(fn_ty) == TypeKind::Function, - "builder::{typ} not passed a function, but {fn_ty:?}" - ); - - let param_tys = self.cx.func_params_types(fn_ty); - - let all_args_match = iter::zip(¶m_tys, args.iter().map(|&v| self.cx.val_ty(v))) - .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); - - if all_args_match { - return Cow::Borrowed(args); - } - - let casted_args: Vec<_> = iter::zip(param_tys, args) - .enumerate() - .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = self.cx.val_ty(actual_val); - if expected_ty != actual_ty { - debug!( - "type mismatch in function call of {:?}. \ - Expected {:?} for param {}, got {:?}; injecting bitcast", - llfn, expected_ty, i, actual_ty - ); - self.bitcast(actual_val, expected_ty) - } else { - actual_val - } - }) - .collect(); - - Cow::Owned(casted_args) - } } /// Empty string, to be used where LLVM expects an instruction name, indicating @@ -168,17 +125,17 @@ impl<'a, 'll> SBuilder<'a, 'll> { // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer. const UNNAMED: *const c_char = c"".as_ptr(); -impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> { - type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value; - type Metadata = <CodegenCx<'ll, 'tcx> as BackendTypes>::Metadata; - type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function; - type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock; - type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type; - type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet; - - type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope; - type DILocation = <CodegenCx<'ll, 'tcx> as BackendTypes>::DILocation; - type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable; +impl<'ll, CX: Borrow<SCx<'ll>>> BackendTypes for GenericBuilder<'_, 'll, CX> { + type Value = <GenericCx<'ll, CX> as BackendTypes>::Value; + type Metadata = <GenericCx<'ll, CX> as BackendTypes>::Metadata; + type Function = <GenericCx<'ll, CX> as BackendTypes>::Function; + type BasicBlock = <GenericCx<'ll, CX> as BackendTypes>::BasicBlock; + type Type = <GenericCx<'ll, CX> as BackendTypes>::Type; + type Funclet = <GenericCx<'ll, CX> as BackendTypes>::Funclet; + + type DIScope = <GenericCx<'ll, CX> as BackendTypes>::DIScope; + type DILocation = <GenericCx<'ll, CX> as BackendTypes>::DILocation; + type DIVariable = <GenericCx<'ll, CX> as BackendTypes>::DIVariable; } impl abi::HasDataLayout for Builder<'_, '_, '_> { @@ -294,9 +251,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn ret_void(&mut self) { - unsafe { - llvm::LLVMBuildRetVoid(self.llbuilder); - } + llvm::LLVMBuildRetVoid(self.llbuilder); } fn ret(&mut self, v: &'ll Value) { @@ -357,8 +312,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { // This function handles switch instructions with more than 2 targets and it needs to // emit branch weights metadata instead of using the intrinsic. // The values 1 and 2000 are the same as the values used by the `llvm.expect` intrinsic. - let cold_weight = unsafe { llvm::LLVMValueAsMetadata(self.cx.const_u32(1)) }; - let hot_weight = unsafe { llvm::LLVMValueAsMetadata(self.cx.const_u32(2000)) }; + let cold_weight = llvm::LLVMValueAsMetadata(self.cx.const_u32(1)); + let hot_weight = llvm::LLVMValueAsMetadata(self.cx.const_u32(2000)); let weight = |is_cold: bool| -> &Metadata { if is_cold { cold_weight } else { hot_weight } }; @@ -1506,26 +1461,12 @@ impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> { } impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { - fn build(cx: &'a CodegenCx<'ll, 'tcx>, llbb: &'ll BasicBlock) -> Builder<'a, 'll, 'tcx> { - let bx = Builder::with_cx(cx); - unsafe { - llvm::LLVMPositionBuilderAtEnd(bx.llbuilder, llbb); - } - bx - } - - fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { - // Create a fresh builder from the crate context. - let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) }; - Builder { llbuilder, cx } - } - pub(crate) fn llfn(&self) -> &'ll Value { unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) } } } -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { +impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> { fn position_at_start(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); @@ -1555,7 +1496,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { } } } -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { +impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> { pub(crate) fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) } } @@ -1656,9 +1597,7 @@ impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { let ret = unsafe { llvm::LLVMBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) }; ret.expect("LLVM does not have support for catchret") } -} -impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { fn check_call<'b>( &mut self, typ: &str, @@ -1673,7 +1612,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { let param_tys = self.cx.func_params_types(fn_ty); - let all_args_match = iter::zip(¶m_tys, args.iter().map(|&v| self.val_ty(v))) + let all_args_match = iter::zip(¶m_tys, args.iter().map(|&v| self.cx.val_ty(v))) .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); if all_args_match { @@ -1683,7 +1622,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { let casted_args: Vec<_> = iter::zip(param_tys, args) .enumerate() .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = self.val_ty(actual_val); + let actual_ty = self.cx.val_ty(actual_val); if expected_ty != actual_ty { debug!( "type mismatch in function call of {:?}. \ @@ -1699,12 +1638,12 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { Cow::Owned(casted_args) } -} -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { + pub(crate) fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) } } } + impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value { let (ty, f) = self.cx.get_intrinsic(intrinsic); @@ -1724,7 +1663,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]); } } -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { +impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> { pub(crate) fn phi( &mut self, ty: &'ll Type, diff --git a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs index 2c7899975e3..7cd4ee539d8 100644 --- a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs +++ b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs @@ -3,6 +3,7 @@ use std::ptr; use rustc_ast::expand::autodiff_attrs::{AutoDiffAttrs, AutoDiffItem, DiffActivity, DiffMode}; use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_ssa::back::write::ModuleConfig; +use rustc_codegen_ssa::traits::BaseTypeCodegenMethods as _; use rustc_errors::FatalError; use tracing::{debug, trace}; @@ -27,6 +28,113 @@ fn get_params(fnc: &Value) -> Vec<&Value> { } } +fn match_args_from_caller_to_enzyme<'ll>( + cx: &SimpleCx<'ll>, + args: &mut Vec<&'ll llvm::Value>, + inputs: &[DiffActivity], + outer_args: &[&'ll llvm::Value], +) { + debug!("matching autodiff arguments"); + // We now handle the issue that Rust level arguments not always match the llvm-ir level + // arguments. A slice, `&[f32]`, for example, is represented as a pointer and a length on + // llvm-ir level. The number of activities matches the number of Rust level arguments, so we + // need to match those. + // FIXME(ZuseZ4): This logic is a bit more complicated than it should be, can we simplify it + // using iterators and peek()? + let mut outer_pos: usize = 0; + let mut activity_pos = 0; + + let enzyme_const = cx.create_metadata("enzyme_const".to_string()).unwrap(); + let enzyme_out = cx.create_metadata("enzyme_out".to_string()).unwrap(); + let enzyme_dup = cx.create_metadata("enzyme_dup".to_string()).unwrap(); + let enzyme_dupnoneed = cx.create_metadata("enzyme_dupnoneed".to_string()).unwrap(); + + while activity_pos < inputs.len() { + let diff_activity = inputs[activity_pos as usize]; + // Duplicated arguments received a shadow argument, into which enzyme will write the + // gradient. + let (activity, duplicated): (&Metadata, bool) = match diff_activity { + DiffActivity::None => panic!("not a valid input activity"), + DiffActivity::Const => (enzyme_const, false), + DiffActivity::Active => (enzyme_out, false), + DiffActivity::ActiveOnly => (enzyme_out, false), + DiffActivity::Dual => (enzyme_dup, true), + DiffActivity::DualOnly => (enzyme_dupnoneed, true), + DiffActivity::Duplicated => (enzyme_dup, true), + DiffActivity::DuplicatedOnly => (enzyme_dupnoneed, true), + DiffActivity::FakeActivitySize => (enzyme_const, false), + }; + let outer_arg = outer_args[outer_pos]; + args.push(cx.get_metadata_value(activity)); + args.push(outer_arg); + if duplicated { + // We know that duplicated args by construction have a following argument, + // so this can not be out of bounds. + let next_outer_arg = outer_args[outer_pos + 1]; + let next_outer_ty = cx.val_ty(next_outer_arg); + // FIXME(ZuseZ4): We should add support for Vec here too, but it's less urgent since + // vectors behind references (&Vec<T>) are already supported. Users can not pass a + // Vec by value for reverse mode, so this would only help forward mode autodiff. + let slice = { + if activity_pos + 1 >= inputs.len() { + // If there is no arg following our ptr, it also can't be a slice, + // since that would lead to a ptr, int pair. + false + } else { + let next_activity = inputs[activity_pos + 1]; + // We analyze the MIR types and add this dummy activity if we visit a slice. + next_activity == DiffActivity::FakeActivitySize + } + }; + if slice { + // A duplicated slice will have the following two outer_fn arguments: + // (..., ptr1, int1, ptr2, int2, ...). We add the following llvm-ir to our __enzyme call: + // (..., metadata! enzyme_dup, ptr, ptr, int1, ...). + // FIXME(ZuseZ4): We will upstream a safety check later which asserts that + // int2 >= int1, which means the shadow vector is large enough to store the gradient. + assert!(unsafe { + llvm::LLVMRustGetTypeKind(next_outer_ty) == llvm::TypeKind::Integer + }); + let next_outer_arg2 = outer_args[outer_pos + 2]; + let next_outer_ty2 = cx.val_ty(next_outer_arg2); + assert!(unsafe { + llvm::LLVMRustGetTypeKind(next_outer_ty2) == llvm::TypeKind::Pointer + }); + let next_outer_arg3 = outer_args[outer_pos + 3]; + let next_outer_ty3 = cx.val_ty(next_outer_arg3); + assert!(unsafe { + llvm::LLVMRustGetTypeKind(next_outer_ty3) == llvm::TypeKind::Integer + }); + args.push(next_outer_arg2); + args.push(cx.get_metadata_value(enzyme_const)); + args.push(next_outer_arg); + outer_pos += 4; + activity_pos += 2; + } else { + // A duplicated pointer will have the following two outer_fn arguments: + // (..., ptr, ptr, ...). We add the following llvm-ir to our __enzyme call: + // (..., metadata! enzyme_dup, ptr, ptr, ...). + if matches!(diff_activity, DiffActivity::Duplicated | DiffActivity::DuplicatedOnly) + { + assert!( + unsafe { llvm::LLVMRustGetTypeKind(next_outer_ty) } + == llvm::TypeKind::Pointer + ); + } + // In the case of Dual we don't have assumptions, e.g. f32 would be valid. + args.push(next_outer_arg); + outer_pos += 2; + activity_pos += 1; + } + } else { + // We do not differentiate with resprect to this argument. + // We already added the metadata and argument above, so just increase the counters. + outer_pos += 1; + activity_pos += 1; + } + } +} + /// When differentiating `fn_to_diff`, take a `outer_fn` and generate another /// function with expected naming and calling conventions[^1] which will be /// discovered by the enzyme LLVM pass and its body populated with the differentiated @@ -42,9 +150,6 @@ fn generate_enzyme_call<'ll>( outer_fn: &'ll Value, attrs: AutoDiffAttrs, ) { - let inputs = attrs.input_activity; - let output = attrs.ret_activity; - // We have to pick the name depending on whether we want forward or reverse mode autodiff. let mut ad_name: String = match attrs.mode { DiffMode::Forward => "__enzyme_fwddiff", @@ -131,111 +236,13 @@ fn generate_enzyme_call<'ll>( let mut args = Vec::with_capacity(num_args as usize + 1); args.push(fn_to_diff); - let enzyme_const = cx.create_metadata("enzyme_const".to_string()).unwrap(); - let enzyme_out = cx.create_metadata("enzyme_out".to_string()).unwrap(); - let enzyme_dup = cx.create_metadata("enzyme_dup".to_string()).unwrap(); - let enzyme_dupnoneed = cx.create_metadata("enzyme_dupnoneed".to_string()).unwrap(); let enzyme_primal_ret = cx.create_metadata("enzyme_primal_return".to_string()).unwrap(); - - match output { - DiffActivity::Dual => { - args.push(cx.get_metadata_value(enzyme_primal_ret)); - } - DiffActivity::Active => { - args.push(cx.get_metadata_value(enzyme_primal_ret)); - } - _ => {} + if matches!(attrs.ret_activity, DiffActivity::Dual | DiffActivity::Active) { + args.push(cx.get_metadata_value(enzyme_primal_ret)); } - debug!("matching autodiff arguments"); - // We now handle the issue that Rust level arguments not always match the llvm-ir level - // arguments. A slice, `&[f32]`, for example, is represented as a pointer and a length on - // llvm-ir level. The number of activities matches the number of Rust level arguments, so we - // need to match those. - // FIXME(ZuseZ4): This logic is a bit more complicated than it should be, can we simplify it - // using iterators and peek()? - let mut outer_pos: usize = 0; - let mut activity_pos = 0; let outer_args: Vec<&llvm::Value> = get_params(outer_fn); - while activity_pos < inputs.len() { - let diff_activity = inputs[activity_pos as usize]; - // Duplicated arguments received a shadow argument, into which enzyme will write the - // gradient. - let (activity, duplicated): (&Metadata, bool) = match diff_activity { - DiffActivity::None => panic!("not a valid input activity"), - DiffActivity::Const => (enzyme_const, false), - DiffActivity::Active => (enzyme_out, false), - DiffActivity::ActiveOnly => (enzyme_out, false), - DiffActivity::Dual => (enzyme_dup, true), - DiffActivity::DualOnly => (enzyme_dupnoneed, true), - DiffActivity::Duplicated => (enzyme_dup, true), - DiffActivity::DuplicatedOnly => (enzyme_dupnoneed, true), - DiffActivity::FakeActivitySize => (enzyme_const, false), - }; - let outer_arg = outer_args[outer_pos]; - args.push(cx.get_metadata_value(activity)); - args.push(outer_arg); - if duplicated { - // We know that duplicated args by construction have a following argument, - // so this can not be out of bounds. - let next_outer_arg = outer_args[outer_pos + 1]; - let next_outer_ty = cx.val_ty(next_outer_arg); - // FIXME(ZuseZ4): We should add support for Vec here too, but it's less urgent since - // vectors behind references (&Vec<T>) are already supported. Users can not pass a - // Vec by value for reverse mode, so this would only help forward mode autodiff. - let slice = { - if activity_pos + 1 >= inputs.len() { - // If there is no arg following our ptr, it also can't be a slice, - // since that would lead to a ptr, int pair. - false - } else { - let next_activity = inputs[activity_pos + 1]; - // We analyze the MIR types and add this dummy activity if we visit a slice. - next_activity == DiffActivity::FakeActivitySize - } - }; - if slice { - // A duplicated slice will have the following two outer_fn arguments: - // (..., ptr1, int1, ptr2, int2, ...). We add the following llvm-ir to our __enzyme call: - // (..., metadata! enzyme_dup, ptr, ptr, int1, ...). - // FIXME(ZuseZ4): We will upstream a safety check later which asserts that - // int2 >= int1, which means the shadow vector is large enough to store the gradient. - assert!(llvm::LLVMRustGetTypeKind(next_outer_ty) == llvm::TypeKind::Integer); - let next_outer_arg2 = outer_args[outer_pos + 2]; - let next_outer_ty2 = cx.val_ty(next_outer_arg2); - assert!(llvm::LLVMRustGetTypeKind(next_outer_ty2) == llvm::TypeKind::Pointer); - let next_outer_arg3 = outer_args[outer_pos + 3]; - let next_outer_ty3 = cx.val_ty(next_outer_arg3); - assert!(llvm::LLVMRustGetTypeKind(next_outer_ty3) == llvm::TypeKind::Integer); - args.push(next_outer_arg2); - args.push(cx.get_metadata_value(enzyme_const)); - args.push(next_outer_arg); - outer_pos += 4; - activity_pos += 2; - } else { - // A duplicated pointer will have the following two outer_fn arguments: - // (..., ptr, ptr, ...). We add the following llvm-ir to our __enzyme call: - // (..., metadata! enzyme_dup, ptr, ptr, ...). - if matches!( - diff_activity, - DiffActivity::Duplicated | DiffActivity::DuplicatedOnly - ) { - assert!( - llvm::LLVMRustGetTypeKind(next_outer_ty) == llvm::TypeKind::Pointer - ); - } - // In the case of Dual we don't have assumptions, e.g. f32 would be valid. - args.push(next_outer_arg); - outer_pos += 2; - activity_pos += 1; - } - } else { - // We do not differentiate with resprect to this argument. - // We already added the metadata and argument above, so just increase the counters. - outer_pos += 1; - activity_pos += 1; - } - } + match_args_from_caller_to_enzyme(&cx, &mut args, &attrs.input_activity, &outer_args); let call = builder.call(enzyme_ty, ad_fn, &args, None); @@ -286,7 +293,8 @@ pub(crate) fn differentiate<'ll>( } let diag_handler = cgcx.create_dcx(); - let cx = SimpleCx { llmod: module.module_llvm.llmod(), llcx: module.module_llvm.llcx }; + + let cx = SimpleCx::new(module.module_llvm.llmod(), module.module_llvm.llcx, cgcx.pointer_size); // First of all, did the user try to use autodiff without using the -Zautodiff=Enable flag? if !diff_items.is_empty() diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 0621b893e75..457e5452ce9 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -1,5 +1,7 @@ //! Code that is useful in various codegen modules. +use std::borrow::Borrow; + use libc::{c_char, c_uint}; use rustc_abi as abi; use rustc_abi::Primitive::Pointer; @@ -18,6 +20,7 @@ use tracing::debug; use crate::consts::const_alloc_to_llvm; pub(crate) use crate::context::CodegenCx; +use crate::context::{GenericCx, SCx}; use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, Metadata, True}; use crate::type_::Type; use crate::value::Value; @@ -81,7 +84,7 @@ impl<'ll> Funclet<'ll> { } } -impl<'ll> BackendTypes for CodegenCx<'ll, '_> { +impl<'ll, CX: Borrow<SCx<'ll>>> BackendTypes for GenericCx<'ll, CX> { type Value = &'ll Value; type Metadata = &'ll Metadata; // FIXME(eddyb) replace this with a `Function` "subclass" of `Value`. @@ -118,7 +121,7 @@ impl<'ll> CodegenCx<'ll, '_> { } } -impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { +impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { fn const_null(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMConstNull(t) } } @@ -127,10 +130,6 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { llvm::LLVMGetUndef(t) } } - fn is_undef(&self, v: &'ll Value) -> bool { - unsafe { llvm::LLVMIsUndef(v) == True } - } - fn const_poison(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMGetPoison(t) } } @@ -209,28 +208,24 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn const_str(&self, s: &str) -> (&'ll Value, &'ll Value) { - let str_global = *self - .const_str_cache - .borrow_mut() - .raw_entry_mut() - .from_key(s) - .or_insert_with(|| { - let sc = self.const_bytes(s.as_bytes()); - let sym = self.generate_local_symbol_name("str"); - let g = self.define_global(&sym, self.val_ty(sc)).unwrap_or_else(|| { - bug!("symbol `{}` is already defined", sym); - }); - llvm::set_initializer(g, sc); - unsafe { - llvm::LLVMSetGlobalConstant(g, True); - llvm::LLVMSetUnnamedAddress(g, llvm::UnnamedAddr::Global); - } - llvm::set_linkage(g, llvm::Linkage::InternalLinkage); - // Cast to default address space if globals are in a different addrspace - let g = self.const_pointercast(g, self.type_ptr()); - (s.to_owned(), g) - }) - .1; + let mut const_str_cache = self.const_str_cache.borrow_mut(); + let str_global = const_str_cache.get(s).copied().unwrap_or_else(|| { + let sc = self.const_bytes(s.as_bytes()); + let sym = self.generate_local_symbol_name("str"); + let g = self.define_global(&sym, self.val_ty(sc)).unwrap_or_else(|| { + bug!("symbol `{}` is already defined", sym); + }); + llvm::set_initializer(g, sc); + unsafe { + llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMSetUnnamedAddress(g, llvm::UnnamedAddr::Global); + } + llvm::set_linkage(g, llvm::Linkage::InternalLinkage); + // Cast to default address space if globals are in a different addrspace + let g = self.const_pointercast(g, self.type_ptr()); + const_str_cache.insert(s.to_owned(), g); + g + }); let len = s.len(); (str_global, self.const_usize(len as u64)) } @@ -350,7 +345,7 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value { + fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value { const_alloc_to_llvm(self, alloc, /*static*/ false) } diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 330e8a8f406..62fa2884e0f 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -5,6 +5,7 @@ use rustc_abi::{ }; use rustc_codegen_ssa::common; use rustc_codegen_ssa::traits::*; +use rustc_hir::LangItem; use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; @@ -12,11 +13,10 @@ use rustc_middle::mir::interpret::{ Allocation, ConstAllocation, ErrorHandled, InitChunk, Pointer, Scalar as InterpScalar, read_target_uint, }; -use rustc_middle::mir::mono::MonoItem; -use rustc_middle::ty::Instance; +use rustc_middle::mir::mono::{Linkage, MonoItem}; use rustc_middle::ty::layout::{HasTypingEnv, LayoutOf}; +use rustc_middle::ty::{self, Instance}; use rustc_middle::{bug, span_bug}; -use rustc_session::config::Lto; use tracing::{debug, instrument, trace}; use crate::common::{AsCCharPtr, CodegenCx}; @@ -172,8 +172,27 @@ fn check_and_apply_linkage<'ll, 'tcx>( if let Some(linkage) = attrs.import_linkage { debug!("get_static: sym={} linkage={:?}", sym, linkage); - // Declare a symbol `foo` with the desired linkage. - let g1 = cx.declare_global(sym, cx.type_i8()); + // Declare a symbol `foo`. If `foo` is an extern_weak symbol, we declare + // an extern_weak function, otherwise a global with the desired linkage. + let g1 = if matches!(attrs.import_linkage, Some(Linkage::ExternalWeak)) { + // An `extern_weak` function is represented as an `Option<unsafe extern ...>`, + // we extract the function signature and declare it as an extern_weak function + // instead of an extern_weak i8. + let instance = Instance::mono(cx.tcx, def_id); + if let ty::Adt(struct_def, args) = instance.ty(cx.tcx, cx.typing_env()).kind() + && cx.tcx.is_lang_item(struct_def.did(), LangItem::Option) + && let ty::FnPtr(sig, header) = args.type_at(0).kind() + { + let fn_sig = sig.with(*header); + + let fn_abi = cx.fn_abi_of_fn_ptr(fn_sig, ty::List::empty()); + cx.declare_fn(sym, &fn_abi, None) + } else { + cx.declare_global(sym, cx.type_i8()) + } + } else { + cx.declare_global(sym, cx.type_i8()) + }; llvm::set_linkage(g1, base::linkage_to_llvm(linkage)); // Declare an internal global `extern_with_linkage_foo` which @@ -344,11 +363,11 @@ impl<'ll> CodegenCx<'ll, '_> { // Local definitions can never be imported, so we must not apply // the DLLImport annotation. && !dso_local - // ThinLTO can't handle this workaround in all cases, so we don't - // emit the attrs. Instead we make them unnecessary by disallowing - // dynamic linking when linker plugin based LTO is enabled. - && !self.tcx.sess.opts.cg.linker_plugin_lto.enabled() - && self.tcx.sess.lto() != Lto::Thin; + // Linker plugin ThinLTO doesn't create the self-dllimport Rust uses for rlibs + // as the code generation happens out of process. Instead we assume static linkage + // and disallow dynamic linking when linker plugin based LTO is enabled. + // Regular in-process ThinLTO doesn't need this workaround. + && !self.tcx.sess.opts.cg.linker_plugin_lto.enabled(); // If this assertion triggers, there's something wrong with commandline // argument validation. @@ -490,7 +509,7 @@ impl<'ll> CodegenCx<'ll, '_> { llvm::LLVMMDStringInContext2(self.llcx, bytes.as_c_char_ptr(), bytes.len()); let data = [section, alloc]; let meta = llvm::LLVMMDNodeInContext2(self.llcx, data.as_ptr(), data.len()); - let val = llvm::LLVMMetadataAsValue(self.llcx, meta); + let val = self.get_metadata_value(meta); llvm::LLVMAddNamedMetadataOperand( self.llmod, c"wasm.custom_sections".as_ptr(), diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index e367cf90eee..f7b096ff976 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -1,13 +1,13 @@ use std::borrow::Borrow; use std::cell::{Cell, RefCell}; use std::ffi::{CStr, c_char, c_uint}; +use std::marker::PhantomData; use std::ops::Deref; use std::str; -use rustc_abi::{HasDataLayout, TargetDataLayout, VariantIdx}; +use rustc_abi::{HasDataLayout, Size, TargetDataLayout, VariantIdx}; use rustc_codegen_ssa::back::versioned_llvm_target; use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh}; -use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::errors as ssa_errors; use rustc_codegen_ssa::traits::*; use rustc_data_structures::base_n::{ALPHANUMERIC_ONLY, ToBaseN}; @@ -27,14 +27,15 @@ use rustc_session::config::{ }; use rustc_span::source_map::Spanned; use rustc_span::{DUMMY_SP, Span}; +use rustc_symbol_mangling::mangle_internal_symbol; use rustc_target::spec::{HasTargetSpec, RelocModel, SmallDataThresholdSupport, Target, TlsModel}; use smallvec::SmallVec; use crate::back::write::to_llvm_code_model; use crate::callee::get_fn; -use crate::common::{self, AsCCharPtr}; +use crate::common::AsCCharPtr; use crate::debuginfo::metadata::apply_vcall_visibility_metadata; -use crate::llvm::{Metadata, MetadataType}; +use crate::llvm::Metadata; use crate::type_::Type; use crate::value::Value; use crate::{attributes, coverageinfo, debuginfo, llvm, llvm_util}; @@ -43,18 +44,19 @@ use crate::{attributes, coverageinfo, debuginfo, llvm, llvm_util}; /// However, there are various cx related functions which we want to be available to the builder and /// other compiler pieces. Here we define a small subset which has enough information and can be /// moved around more freely. -pub(crate) struct SimpleCx<'ll> { +pub(crate) struct SCx<'ll> { pub llmod: &'ll llvm::Module, pub llcx: &'ll llvm::Context, + pub isize_ty: &'ll Type, } -impl<'ll> Borrow<SimpleCx<'ll>> for CodegenCx<'ll, '_> { - fn borrow(&self) -> &SimpleCx<'ll> { +impl<'ll> Borrow<SCx<'ll>> for FullCx<'ll, '_> { + fn borrow(&self) -> &SCx<'ll> { &self.scx } } -impl<'ll, 'tcx> Deref for CodegenCx<'ll, 'tcx> { +impl<'ll, 'tcx> Deref for FullCx<'ll, 'tcx> { type Target = SimpleCx<'ll>; #[inline] @@ -63,10 +65,25 @@ impl<'ll, 'tcx> Deref for CodegenCx<'ll, 'tcx> { } } +pub(crate) struct GenericCx<'ll, T: Borrow<SCx<'ll>>>(T, PhantomData<SCx<'ll>>); + +impl<'ll, T: Borrow<SCx<'ll>>> Deref for GenericCx<'ll, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +pub(crate) type SimpleCx<'ll> = GenericCx<'ll, SCx<'ll>>; + /// There is one `CodegenCx` per codegen unit. Each one has its own LLVM /// `llvm::Context` so that several codegen units may be processed in parallel. /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`. -pub(crate) struct CodegenCx<'ll, 'tcx> { +pub(crate) type CodegenCx<'ll, 'tcx> = GenericCx<'ll, FullCx<'ll, 'tcx>>; + +pub(crate) struct FullCx<'ll, 'tcx> { pub tcx: TyCtxt<'tcx>, pub scx: SimpleCx<'ll>, pub use_dll_storage_attrs: bool, @@ -104,8 +121,6 @@ pub(crate) struct CodegenCx<'ll, 'tcx> { /// Mapping of scalar types to llvm types. pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>, - pub isize_ty: &'ll Type, - /// Extra per-CGU codegen state needed when coverage instrumentation is enabled. pub coverage_cx: Option<coverageinfo::CguCoverageContext<'ll, 'tcx>>, pub dbg_cx: Option<debuginfo::CodegenUnitDebugContext<'ll, 'tcx>>, @@ -579,33 +594,33 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { None }; - let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits()); - - CodegenCx { - tcx, - scx: SimpleCx { llcx, llmod }, - use_dll_storage_attrs, - tls_model, - codegen_unit, - instances: Default::default(), - vtables: Default::default(), - const_str_cache: Default::default(), - const_globals: Default::default(), - statics_to_rauw: RefCell::new(Vec::new()), - used_statics: RefCell::new(Vec::new()), - compiler_used_statics: RefCell::new(Vec::new()), - type_lowering: Default::default(), - scalar_lltypes: Default::default(), - isize_ty, - coverage_cx, - dbg_cx, - eh_personality: Cell::new(None), - eh_catch_typeinfo: Cell::new(None), - rust_try_fn: Cell::new(None), - intrinsics: Default::default(), - local_gen_sym_counter: Cell::new(0), - renamed_statics: Default::default(), - } + GenericCx( + FullCx { + tcx, + scx: SimpleCx::new(llmod, llcx, tcx.data_layout.pointer_size), + use_dll_storage_attrs, + tls_model, + codegen_unit, + instances: Default::default(), + vtables: Default::default(), + const_str_cache: Default::default(), + const_globals: Default::default(), + statics_to_rauw: RefCell::new(Vec::new()), + used_statics: RefCell::new(Vec::new()), + compiler_used_statics: RefCell::new(Vec::new()), + type_lowering: Default::default(), + scalar_lltypes: Default::default(), + coverage_cx, + dbg_cx, + eh_personality: Cell::new(None), + eh_catch_typeinfo: Cell::new(None), + rust_try_fn: Cell::new(None), + intrinsics: Default::default(), + local_gen_sym_counter: Cell::new(0), + renamed_statics: Default::default(), + }, + PhantomData, + ) } pub(crate) fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> { @@ -628,24 +643,32 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { llvm::set_section(g, c"llvm.metadata"); } } + impl<'ll> SimpleCx<'ll> { - pub(crate) fn val_ty(&self, v: &'ll Value) -> &'ll Type { - common::val_ty(v) + pub(crate) fn new( + llmod: &'ll llvm::Module, + llcx: &'ll llvm::Context, + pointer_size: Size, + ) -> Self { + let isize_ty = llvm::Type::ix_llcx(llcx, pointer_size.bits()); + Self(SCx { llmod, llcx, isize_ty }, PhantomData) } +} +impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { pub(crate) fn get_metadata_value(&self, metadata: &'ll Metadata) -> &'ll Value { - unsafe { llvm::LLVMMetadataAsValue(self.llcx, metadata) } + llvm::LLVMMetadataAsValue(self.llcx(), metadata) } pub(crate) fn get_function(&self, name: &str) -> Option<&'ll Value> { let name = SmallCStr::new(name); - unsafe { llvm::LLVMGetNamedFunction(self.llmod, name.as_ptr()) } + unsafe { llvm::LLVMGetNamedFunction((**self).borrow().llmod, name.as_ptr()) } } - pub(crate) fn get_md_kind_id(&self, name: &str) -> u32 { + pub(crate) fn get_md_kind_id(&self, name: &str) -> llvm::MetadataKindId { unsafe { llvm::LLVMGetMDKindIDInContext( - self.llcx, + self.llcx(), name.as_ptr() as *const c_char, name.len() as c_uint, ) @@ -654,13 +677,9 @@ impl<'ll> SimpleCx<'ll> { pub(crate) fn create_metadata(&self, name: String) -> Option<&'ll Metadata> { Some(unsafe { - llvm::LLVMMDStringInContext2(self.llcx, name.as_ptr() as *const c_char, name.len()) + llvm::LLVMMDStringInContext2(self.llcx(), name.as_ptr() as *const c_char, name.len()) }) } - - pub(crate) fn type_kind(&self, ty: &'ll Type) -> TypeKind { - unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() } - } } impl<'ll, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { @@ -1193,7 +1212,7 @@ impl<'ll> CodegenCx<'ll, '_> { Some(def_id) => self.get_static(def_id), _ => { let ty = self.type_struct(&[self.type_ptr(), self.type_ptr()], false); - self.declare_global("rust_eh_catch_typeinfo", ty) + self.declare_global(&mangle_internal_symbol(self.tcx, "rust_eh_catch_typeinfo"), ty) } }; self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo)); @@ -1215,27 +1234,18 @@ impl CodegenCx<'_, '_> { name.push_str(&(idx as u64).to_base(ALPHANUMERIC_ONLY)); name } - - /// A wrapper for [`llvm::LLVMSetMetadata`], but it takes `Metadata` as a parameter instead of `Value`. - pub(crate) fn set_metadata<'a>(&self, val: &'a Value, kind_id: MetadataType, md: &'a Metadata) { - unsafe { - let node = llvm::LLVMMetadataAsValue(&self.llcx, md); - llvm::LLVMSetMetadata(val, kind_id as c_uint, node); - } - } } -// This is a duplication of the set_metadata function above. However, so far it's the only one -// shared between both contexts, so it doesn't seem worth it to make the Cx generic like we did it -// for the Builder. -impl SimpleCx<'_> { - #[allow(unused)] +impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { /// A wrapper for [`llvm::LLVMSetMetadata`], but it takes `Metadata` as a parameter instead of `Value`. - pub(crate) fn set_metadata<'a>(&self, val: &'a Value, kind_id: MetadataType, md: &'a Metadata) { - unsafe { - let node = llvm::LLVMMetadataAsValue(&self.llcx, md); - llvm::LLVMSetMetadata(val, kind_id as c_uint, node); - } + pub(crate) fn set_metadata<'a>( + &self, + val: &'a Value, + kind_id: impl Into<llvm::MetadataKindId>, + md: &'ll Metadata, + ) { + let node = self.get_metadata_value(md); + llvm::LLVMSetMetadata(val, kind_id.into(), node); } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs index b617f4d37f5..f6000e72840 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/ffi.rs @@ -146,6 +146,7 @@ pub(crate) struct CoverageSpan { #[derive(Clone, Debug, Default)] pub(crate) struct Regions { pub(crate) code_regions: Vec<CodeRegion>, + pub(crate) expansion_regions: Vec<ExpansionRegion>, pub(crate) branch_regions: Vec<BranchRegion>, pub(crate) mcdc_branch_regions: Vec<MCDCBranchRegion>, pub(crate) mcdc_decision_regions: Vec<MCDCDecisionRegion>, @@ -154,10 +155,16 @@ pub(crate) struct Regions { impl Regions { /// Returns true if none of this structure's tables contain any regions. pub(crate) fn has_no_regions(&self) -> bool { - let Self { code_regions, branch_regions, mcdc_branch_regions, mcdc_decision_regions } = - self; + let Self { + code_regions, + expansion_regions, + branch_regions, + mcdc_branch_regions, + mcdc_decision_regions, + } = self; code_regions.is_empty() + && expansion_regions.is_empty() && branch_regions.is_empty() && mcdc_branch_regions.is_empty() && mcdc_decision_regions.is_empty() @@ -172,6 +179,14 @@ pub(crate) struct CodeRegion { pub(crate) counter: Counter, } +/// Must match the layout of `LLVMRustCoverageExpansionRegion`. +#[derive(Clone, Debug)] +#[repr(C)] +pub(crate) struct ExpansionRegion { + pub(crate) cov_span: CoverageSpan, + pub(crate) expanded_file_id: u32, +} + /// Must match the layout of `LLVMRustCoverageBranchRegion`. #[derive(Clone, Debug)] #[repr(C)] diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/llvm_cov.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/llvm_cov.rs index 2cd7fa3225a..907d6d41a1f 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/llvm_cov.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/llvm_cov.rs @@ -63,8 +63,18 @@ pub(crate) fn write_function_mappings_to_buffer( expressions: &[ffi::CounterExpression], regions: &ffi::Regions, ) -> Vec<u8> { - let ffi::Regions { code_regions, branch_regions, mcdc_branch_regions, mcdc_decision_regions } = - regions; + let ffi::Regions { + code_regions, + expansion_regions, + branch_regions, + mcdc_branch_regions, + mcdc_decision_regions, + } = regions; + + // SAFETY: + // - All types are FFI-compatible and have matching representations in Rust/C++. + // - For pointer/length pairs, the pointer and length come from the same vector or slice. + // - C++ code does not retain any pointers after the call returns. llvm::build_byte_buffer(|buffer| unsafe { llvm::LLVMRustCoverageWriteFunctionMappingsToBuffer( virtual_file_mapping.as_ptr(), @@ -73,6 +83,8 @@ pub(crate) fn write_function_mappings_to_buffer( expressions.len(), code_regions.as_ptr(), code_regions.len(), + expansion_regions.as_ptr(), + expansion_regions.len(), branch_regions.as_ptr(), branch_regions.len(), mcdc_branch_regions.as_ptr(), diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs index c53ea6d4666..048e1988c32 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs @@ -8,7 +8,7 @@ use std::ffi::CString; use rustc_abi::Align; use rustc_codegen_ssa::traits::{ - BaseTypeCodegenMethods, ConstCodegenMethods, StaticCodegenMethods, + BaseTypeCodegenMethods as _, ConstCodegenMethods, StaticCodegenMethods, }; use rustc_middle::mir::coverage::{ BasicCoverageBlock, CovTerm, CoverageIdsInfo, Expression, FunctionCoverageInfo, Mapping, @@ -105,9 +105,14 @@ fn fill_region_tables<'tcx>( ids_info: &'tcx CoverageIdsInfo, covfun: &mut CovfunRecord<'tcx>, ) { - // Currently a function's mappings must all be in the same file as its body span. + // Currently a function's mappings must all be in the same file, so use the + // first mapping's span to determine the file. let source_map = tcx.sess.source_map(); - let source_file = source_map.lookup_source_file(fn_cov_info.body_span.lo()); + let Some(first_span) = (try { fn_cov_info.mappings.first()?.span }) else { + debug_assert!(false, "function has no mappings: {:?}", covfun.mangled_function_name); + return; + }; + let source_file = source_map.lookup_source_file(first_span.lo()); // Look up the global file ID for that file. let global_file_id = global_file_table.global_file_id_for_file(&source_file); @@ -115,13 +120,22 @@ fn fill_region_tables<'tcx>( // Associate that global file ID with a local file ID for this function. let local_file_id = covfun.virtual_file_mapping.local_id_for_global(global_file_id); - let ffi::Regions { code_regions, branch_regions, mcdc_branch_regions, mcdc_decision_regions } = - &mut covfun.regions; - - let make_cov_span = |span: Span| { - spans::make_coverage_span(local_file_id, source_map, fn_cov_info, &source_file, span) - }; + // In rare cases, _all_ of a function's spans are discarded, and coverage + // codegen needs to handle that gracefully to avoid #133606. + // It's hard for tests to trigger this organically, so instead we set + // `-Zcoverage-options=discard-all-spans-in-codegen` to force it to occur. let discard_all = tcx.sess.coverage_discard_all_spans_in_codegen(); + let make_coords = |span: Span| { + if discard_all { None } else { spans::make_coords(source_map, &source_file, span) } + }; + + let ffi::Regions { + code_regions, + expansion_regions: _, // FIXME(Zalathar): Fill out support for expansion regions + branch_regions, + mcdc_branch_regions, + mcdc_decision_regions, + } = &mut covfun.regions; // For each counter/region pair in this function+file, convert it to a // form suitable for FFI. @@ -136,17 +150,8 @@ fn fill_region_tables<'tcx>( ffi::Counter::from_term(term) }; - // Convert the `Span` into coordinates that we can pass to LLVM, or - // discard the span if conversion fails. In rare, cases _all_ of a - // function's spans are discarded, and the rest of coverage codegen - // needs to handle that gracefully to avoid a repeat of #133606. - // We don't have a good test case for triggering that organically, so - // instead we set `-Zcoverage-options=discard-all-spans-in-codegen` - // to force it to occur. - let Some(cov_span) = make_cov_span(span) else { continue }; - if discard_all { - continue; - } + let Some(coords) = make_coords(span) else { continue }; + let cov_span = coords.make_coverage_span(local_file_id); match *kind { MappingKind::Code { bcb } => { diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs index 6d1d91340c2..39a59560c9d 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/spans.rs @@ -1,4 +1,3 @@ -use rustc_middle::mir::coverage::FunctionCoverageInfo; use rustc_span::source_map::SourceMap; use rustc_span::{BytePos, Pos, SourceFile, Span}; use tracing::debug; @@ -6,24 +5,41 @@ use tracing::debug; use crate::coverageinfo::ffi; use crate::coverageinfo::mapgen::LocalFileId; +/// Line and byte-column coordinates of a source code span within some file. +/// The file itself must be tracked separately. +#[derive(Clone, Copy, Debug)] +pub(crate) struct Coords { + /// 1-based starting line of the source code span. + pub(crate) start_line: u32, + /// 1-based starting column (in bytes) of the source code span. + pub(crate) start_col: u32, + /// 1-based ending line of the source code span. + pub(crate) end_line: u32, + /// 1-based ending column (in bytes) of the source code span. High bit must be unset. + pub(crate) end_col: u32, +} + +impl Coords { + /// Attaches a local file ID to these coordinates to produce an `ffi::CoverageSpan`. + pub(crate) fn make_coverage_span(&self, local_file_id: LocalFileId) -> ffi::CoverageSpan { + let &Self { start_line, start_col, end_line, end_col } = self; + let file_id = local_file_id.as_u32(); + ffi::CoverageSpan { file_id, start_line, start_col, end_line, end_col } + } +} + /// Converts the span into its start line and column, and end line and column. /// /// Line numbers and column numbers are 1-based. Unlike most column numbers emitted by /// the compiler, these column numbers are denoted in **bytes**, because that's what /// LLVM's `llvm-cov` tool expects to see in coverage maps. /// -/// Returns `None` if the conversion failed for some reason. This shouldn't happen, +/// Returns `None` if the conversion failed for some reason. This should be uncommon, /// but it's hard to rule out entirely (especially in the presence of complex macros /// or other expansions), and if it does happen then skipping a span or function is /// better than an ICE or `llvm-cov` failure that the user might have no way to avoid. -pub(crate) fn make_coverage_span( - file_id: LocalFileId, - source_map: &SourceMap, - fn_cov_info: &FunctionCoverageInfo, - file: &SourceFile, - span: Span, -) -> Option<ffi::CoverageSpan> { - let span = ensure_non_empty_span(source_map, fn_cov_info, span)?; +pub(crate) fn make_coords(source_map: &SourceMap, file: &SourceFile, span: Span) -> Option<Coords> { + let span = ensure_non_empty_span(source_map, span)?; let lo = span.lo(); let hi = span.hi(); @@ -46,8 +62,7 @@ pub(crate) fn make_coverage_span( start_line = source_map.doctest_offset_line(&file.name, start_line); end_line = source_map.doctest_offset_line(&file.name, end_line); - check_coverage_span(ffi::CoverageSpan { - file_id: file_id.as_u32(), + check_coords(Coords { start_line: start_line as u32, start_col: start_col as u32, end_line: end_line as u32, @@ -55,36 +70,22 @@ pub(crate) fn make_coverage_span( }) } -fn ensure_non_empty_span( - source_map: &SourceMap, - fn_cov_info: &FunctionCoverageInfo, - span: Span, -) -> Option<Span> { +fn ensure_non_empty_span(source_map: &SourceMap, span: Span) -> Option<Span> { if !span.is_empty() { return Some(span); } - let lo = span.lo(); - let hi = span.hi(); - - // The span is empty, so try to expand it to cover an adjacent '{' or '}', - // but only within the bounds of the body span. - let try_next = hi < fn_cov_info.body_span.hi(); - let try_prev = fn_cov_info.body_span.lo() < lo; - if !(try_next || try_prev) { - return None; - } - + // The span is empty, so try to enlarge it to cover an adjacent '{' or '}'. source_map .span_to_source(span, |src, start, end| try { // Adjusting span endpoints by `BytePos(1)` is normally a bug, // but in this case we have specifically checked that the character // we're skipping over is one of two specific ASCII characters, so // adjusting by exactly 1 byte is correct. - if try_next && src.as_bytes()[end] == b'{' { - Some(span.with_hi(hi + BytePos(1))) - } else if try_prev && src.as_bytes()[start - 1] == b'}' { - Some(span.with_lo(lo - BytePos(1))) + if src.as_bytes().get(end).copied() == Some(b'{') { + Some(span.with_hi(span.hi() + BytePos(1))) + } else if start > 0 && src.as_bytes()[start - 1] == b'}' { + Some(span.with_lo(span.lo() - BytePos(1))) } else { None } @@ -96,8 +97,8 @@ fn ensure_non_empty_span( /// it will immediately exit with a fatal error. To prevent that from happening, /// discard regions that are improperly ordered, or might be interpreted in a /// way that makes them improperly ordered. -fn check_coverage_span(cov_span: ffi::CoverageSpan) -> Option<ffi::CoverageSpan> { - let ffi::CoverageSpan { file_id: _, start_line, start_col, end_line, end_col } = cov_span; +fn check_coords(coords: Coords) -> Option<Coords> { + let Coords { start_line, start_col, end_line, end_col } = coords; // Line/column coordinates are supposed to be 1-based. If we ever emit // coordinates of 0, `llvm-cov` might misinterpret them. @@ -110,17 +111,17 @@ fn check_coverage_span(cov_span: ffi::CoverageSpan) -> Option<ffi::CoverageSpan> let is_ordered = (start_line, start_col) <= (end_line, end_col); if all_nonzero && end_col_has_high_bit_unset && is_ordered { - Some(cov_span) + Some(coords) } else { debug!( - ?cov_span, + ?coords, ?all_nonzero, ?end_col_has_high_bit_unset, ?is_ordered, "Skipping source region that would be misinterpreted or rejected by LLVM" ); // If this happens in a debug build, ICE to make it easier to notice. - debug_assert!(false, "Improper source region: {cov_span:?}"); + debug_assert!(false, "Improper source region: {coords:?}"); None } } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 98d59f5a8ae..2eaaf127e41 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -2,6 +2,7 @@ use std::borrow::Cow; use std::fmt::{self, Write}; use std::hash::{Hash, Hasher}; use std::path::{Path, PathBuf}; +use std::sync::Arc; use std::{iter, ptr}; use libc::{c_char, c_longlong, c_uint}; @@ -38,8 +39,8 @@ use crate::debuginfo::metadata::type_map::build_type_with_children; use crate::debuginfo::utils::{WidePtrKind, wide_pointer_kind}; use crate::llvm; use crate::llvm::debuginfo::{ - DIDescriptor, DIFile, DIFlags, DILexicalBlock, DIScope, DIType, DebugEmissionKind, - DebugNameTableKind, + DIBasicType, DIBuilder, DICompositeType, DIDescriptor, DIFile, DIFlags, DILexicalBlock, + DIScope, DIType, DebugEmissionKind, DebugNameTableKind, }; use crate::value::Value; @@ -68,7 +69,8 @@ pub(super) const UNKNOWN_COLUMN_NUMBER: c_uint = 0; const NO_SCOPE_METADATA: Option<&DIScope> = None; /// A function that returns an empty list of generic parameter debuginfo nodes. -const NO_GENERICS: for<'ll> fn(&CodegenCx<'ll, '_>) -> SmallVec<&'ll DIType> = |_| SmallVec::new(); +const NO_GENERICS: for<'ll> fn(&CodegenCx<'ll, '_>) -> SmallVec<Option<&'ll DIType>> = + |_| SmallVec::new(); // SmallVec is used quite a bit in this module, so create a shorthand. // The actual number of elements is not so important. @@ -243,7 +245,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>( cx, owner, addr_field_name, - (addr_field.size, addr_field.align.abi), + addr_field, layout.fields.offset(WIDE_PTR_ADDR), DIFlags::FlagZero, data_ptr_type_di_node, @@ -253,7 +255,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>( cx, owner, extra_field_name, - (extra_field.size, extra_field.align.abi), + extra_field, layout.fields.offset(WIDE_PTR_EXTRA), DIFlags::FlagZero, type_di_node(cx, extra_field.ty), @@ -311,12 +313,7 @@ fn build_subroutine_type_di_node<'ll, 'tcx>( debug_context(cx).type_map.unique_id_to_di_node.borrow_mut().remove(&unique_type_id); - let fn_di_node = unsafe { - llvm::LLVMRustDIBuilderCreateSubroutineType( - DIB(cx), - create_DIArray(DIB(cx), &signature_di_nodes[..]), - ) - }; + let fn_di_node = create_subroutine_type(cx, create_DIArray(DIB(cx), &signature_di_nodes[..])); // This is actually a function pointer, so wrap it in pointer DI. let name = compute_debuginfo_type_name(cx.tcx, fn_ty, false); @@ -340,6 +337,13 @@ fn build_subroutine_type_di_node<'ll, 'tcx>( DINodeCreationResult::new(di_node, false) } +pub(super) fn create_subroutine_type<'ll>( + cx: &CodegenCx<'ll, '_>, + signature: &'ll DICompositeType, +) -> &'ll DICompositeType { + unsafe { llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(cx), signature) } +} + /// Create debuginfo for `dyn SomeTrait` types. Currently these are empty structs /// we with the correct type name (e.g. "dyn SomeTrait<Foo, Item=u32> + Sync"). fn build_dyn_type_di_node<'ll, 'tcx>( @@ -487,26 +491,22 @@ pub(crate) fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> // FIXME(mw): Cache this via a regular UniqueTypeId instead of an extra field in the debug context. fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll DIType { *debug_context(cx).recursion_marker_type.get_or_init(move || { - unsafe { - // The choice of type here is pretty arbitrary - - // anything reading the debuginfo for a recursive - // type is going to see *something* weird - the only - // question is what exactly it will see. - // - // FIXME: the name `<recur_type>` does not fit the naming scheme - // of other types. - // - // FIXME: it might make sense to use an actual pointer type here - // so that debuggers can show the address. - let name = "<recur_type>"; - llvm::LLVMRustDIBuilderCreateBasicType( - DIB(cx), - name.as_c_char_ptr(), - name.len(), - cx.tcx.data_layout.pointer_size.bits(), - dwarf_const::DW_ATE_unsigned, - ) - } + // The choice of type here is pretty arbitrary - + // anything reading the debuginfo for a recursive + // type is going to see *something* weird - the only + // question is what exactly it will see. + // + // FIXME: the name `<recur_type>` does not fit the naming scheme + // of other types. + // + // FIXME: it might make sense to use an actual pointer type here + // so that debuggers can show the address. + create_basic_type( + cx, + "<recur_type>", + cx.tcx.data_layout.pointer_size, + dwarf_const::DW_ATE_unsigned, + ) }) } @@ -620,42 +620,38 @@ pub(crate) fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFi let source = cx.sess().opts.unstable_opts.embed_source.then_some(()).and(source_file.src.as_ref()); - unsafe { - llvm::LLVMRustDIBuilderCreateFile( - DIB(cx), - file_name.as_c_char_ptr(), - file_name.len(), - directory.as_c_char_ptr(), - directory.len(), - hash_kind, - hash_value.as_c_char_ptr(), - hash_value.len(), - source.map_or(ptr::null(), |x| x.as_c_char_ptr()), - source.map_or(0, |x| x.len()), - ) - } + create_file(DIB(cx), &file_name, &directory, &hash_value, hash_kind, source) } } fn unknown_file_metadata<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile { - debug_context(cx).created_files.borrow_mut().entry(None).or_insert_with(|| unsafe { - let file_name = "<unknown>"; - let directory = ""; - let hash_value = ""; + debug_context(cx).created_files.borrow_mut().entry(None).or_insert_with(|| { + create_file(DIB(cx), "<unknown>", "", "", llvm::ChecksumKind::None, None) + }) +} +fn create_file<'ll>( + builder: &DIBuilder<'ll>, + file_name: &str, + directory: &str, + hash_value: &str, + hash_kind: llvm::ChecksumKind, + source: Option<&Arc<String>>, +) -> &'ll DIFile { + unsafe { llvm::LLVMRustDIBuilderCreateFile( - DIB(cx), + builder, file_name.as_c_char_ptr(), file_name.len(), directory.as_c_char_ptr(), directory.len(), - llvm::ChecksumKind::None, + hash_kind, hash_value.as_c_char_ptr(), hash_value.len(), - ptr::null(), - 0, + source.map_or(ptr::null(), |x| x.as_c_char_ptr()), + source.map_or(0, |x| x.len()), ) - }) + } } trait MsvcBasicName { @@ -742,7 +738,7 @@ fn build_cpp_f16_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> DINodeCreation cx, float_di_node, "bits", - cx.size_and_align_of(bits_ty), + cx.layout_of(bits_ty), Size::ZERO, DIFlags::FlagZero, type_di_node(cx, bits_ty), @@ -788,15 +784,7 @@ fn build_basic_type_di_node<'ll, 'tcx>( _ => bug!("debuginfo::build_basic_type_di_node - `t` is invalid type"), }; - let ty_di_node = unsafe { - llvm::LLVMRustDIBuilderCreateBasicType( - DIB(cx), - name.as_c_char_ptr(), - name.len(), - cx.size_of(t).bits(), - encoding, - ) - }; + let ty_di_node = create_basic_type(cx, name, cx.size_of(t), encoding); if !cpp_like_debuginfo { return DINodeCreationResult::new(ty_di_node, false); @@ -824,6 +812,23 @@ fn build_basic_type_di_node<'ll, 'tcx>( DINodeCreationResult::new(typedef_di_node, false) } +fn create_basic_type<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + name: &str, + size: Size, + encoding: u32, +) -> &'ll DIBasicType { + unsafe { + llvm::LLVMRustDIBuilderCreateBasicType( + DIB(cx), + name.as_c_char_ptr(), + name.len(), + size.bits(), + encoding, + ) + } +} + fn build_foreign_type_di_node<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>, @@ -929,17 +934,13 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>( }; unsafe { - let compile_unit_file = llvm::LLVMRustDIBuilderCreateFile( + let compile_unit_file = create_file( debug_context.builder.as_ref(), - name_in_debuginfo.as_c_char_ptr(), - name_in_debuginfo.len(), - work_dir.as_c_char_ptr(), - work_dir.len(), + &name_in_debuginfo, + &work_dir, + "", llvm::ChecksumKind::None, - ptr::null(), - 0, - ptr::null(), - 0, + None, ); let unit_metadata = llvm::LLVMRustDIBuilderCreateCompileUnit( @@ -971,7 +972,7 @@ fn build_field_di_node<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, owner: &'ll DIScope, name: &str, - size_and_align: (Size, Align), + layout: TyAndLayout<'tcx>, offset: Size, flags: DIFlags, type_di_node: &'ll DIType, @@ -983,6 +984,30 @@ fn build_field_di_node<'ll, 'tcx>( } else { (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER) }; + create_member_type( + cx, + owner, + name, + file_metadata, + line_number, + layout, + offset, + flags, + type_di_node, + ) +} + +fn create_member_type<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + owner: &'ll DIScope, + name: &str, + file_metadata: &'ll DIType, + line_number: u32, + layout: TyAndLayout<'tcx>, + offset: Size, + flags: DIFlags, + type_di_node: &'ll DIType, +) -> &'ll DIType { unsafe { llvm::LLVMRustDIBuilderCreateMemberType( DIB(cx), @@ -991,8 +1016,8 @@ fn build_field_di_node<'ll, 'tcx>( name.len(), file_metadata, line_number, - size_and_align.0.bits(), - size_and_align.1.bits() as u32, + layout.size.bits(), + layout.align.abi.bits() as u32, offset.bits(), flags, type_di_node, @@ -1076,7 +1101,7 @@ fn build_struct_type_di_node<'ll, 'tcx>( cx, owner, &field_name[..], - (field_layout.size, field_layout.align.abi), + field_layout, struct_type_and_layout.fields.offset(i), visibility_di_flags(cx, f.did, adt_def.did()), type_di_node(cx, field_layout.ty), @@ -1126,7 +1151,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>( cx, closure_or_coroutine_di_node, capture_name.as_str(), - cx.size_and_align_of(up_var_ty), + cx.layout_of(up_var_ty), layout.fields.offset(index), DIFlags::FlagZero, type_di_node(cx, up_var_ty), @@ -1171,7 +1196,7 @@ fn build_tuple_type_di_node<'ll, 'tcx>( cx, tuple_di_node, &tuple_field_name(index), - cx.size_and_align_of(component_type), + cx.layout_of(component_type), tuple_type_and_layout.fields.offset(index), DIFlags::FlagZero, type_di_node(cx, component_type), @@ -1269,7 +1294,7 @@ fn build_union_type_di_node<'ll, 'tcx>( cx, owner, f.name.as_str(), - size_and_align_of(field_layout), + field_layout, Size::ZERO, DIFlags::FlagZero, type_di_node(cx, field_layout.ty), @@ -1287,32 +1312,33 @@ fn build_union_type_di_node<'ll, 'tcx>( fn build_generic_type_param_di_nodes<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, ty: Ty<'tcx>, -) -> SmallVec<&'ll DIType> { +) -> SmallVec<Option<&'ll DIType>> { if let ty::Adt(def, args) = *ty.kind() { - if args.types().next().is_some() { - let generics = cx.tcx.generics_of(def.did()); - let names = get_parameter_names(cx, generics); - let template_params: SmallVec<_> = iter::zip(args, names) - .filter_map(|(kind, name)| { - kind.as_type().map(|ty| { - let actual_type = cx.tcx.normalize_erasing_regions(cx.typing_env(), ty); - let actual_type_di_node = type_di_node(cx, actual_type); - let name = name.as_str(); - unsafe { - llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( - DIB(cx), - None, - name.as_c_char_ptr(), - name.len(), - actual_type_di_node, - ) - } - }) + let generics = cx.tcx.generics_of(def.did()); + return get_template_parameters(cx, generics, args); + } + + return smallvec![]; +} + +pub(super) fn get_template_parameters<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx>, + generics: &ty::Generics, + args: ty::GenericArgsRef<'tcx>, +) -> SmallVec<Option<&'ll DIType>> { + if args.types().next().is_some() { + let names = get_parameter_names(cx, generics); + let template_params: SmallVec<_> = iter::zip(args, names) + .filter_map(|(kind, name)| { + kind.as_type().map(|ty| { + let actual_type = cx.tcx.normalize_erasing_regions(cx.typing_env(), ty); + let actual_type_di_node = type_di_node(cx, actual_type); + Some(cx.create_template_type_parameter(name.as_str(), actual_type_di_node)) }) - .collect(); + }) + .collect(); - return template_params; - } + return template_params; } return smallvec![]; @@ -1416,7 +1442,9 @@ fn build_vtable_type_di_node<'ll, 'tcx>( let void_pointer_ty = Ty::new_imm_ptr(tcx, tcx.types.unit); let void_pointer_type_di_node = type_di_node(cx, void_pointer_ty); let usize_di_node = type_di_node(cx, tcx.types.usize); - let (pointer_size, pointer_align) = cx.size_and_align_of(void_pointer_ty); + let pointer_layout = cx.layout_of(void_pointer_ty); + let pointer_size = pointer_layout.size; + let pointer_align = pointer_layout.align.abi; // If `usize` is not pointer-sized and -aligned then the size and alignment computations // for the vtable as a whole would be wrong. Let's make sure this holds even on weird // platforms. @@ -1472,7 +1500,7 @@ fn build_vtable_type_di_node<'ll, 'tcx>( cx, vtable_type_di_node, &field_name, - (pointer_size, pointer_align), + pointer_layout, field_offset, DIFlags::FlagZero, field_type_di_node, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs index a72e205c9b2..07075be55fa 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs @@ -17,8 +17,8 @@ use crate::debuginfo::metadata::enums::DiscrResult; use crate::debuginfo::metadata::type_map::{self, Stub, UniqueTypeId}; use crate::debuginfo::metadata::{ DINodeCreationResult, NO_GENERICS, NO_SCOPE_METADATA, SmallVec, UNKNOWN_LINE_NUMBER, - build_field_di_node, file_metadata, file_metadata_from_def_id, size_and_align_of, type_di_node, - unknown_file_metadata, visibility_di_flags, + build_field_di_node, create_member_type, file_metadata, file_metadata_from_def_id, + size_and_align_of, type_di_node, unknown_file_metadata, visibility_di_flags, }; use crate::debuginfo::utils::DIB; use crate::llvm::debuginfo::{DIFile, DIFlags, DIType}; @@ -370,9 +370,9 @@ fn build_single_variant_union_fields<'ll, 'tcx>( cx, enum_type_di_node, &variant_union_field_name(variant_index), - // NOTE: We use the size and align of the entire type, not from variant_layout + // NOTE: We use the layout of the entire type, not from variant_layout // since the later is sometimes smaller (if it has fewer fields). - size_and_align_of(enum_type_and_layout), + enum_type_and_layout, Size::ZERO, visibility_flags, variant_struct_type_wrapper_di_node, @@ -560,7 +560,7 @@ fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>( cx, wrapper_struct_type_di_node, "value", - size_and_align_of(enum_or_coroutine_type_and_layout), + enum_or_coroutine_type_and_layout, Size::ZERO, DIFlags::FlagZero, variant_struct_type_di_node, @@ -820,7 +820,6 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( .unwrap_or_else(|| (unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER)); let field_name = variant_union_field_name(variant_member_info.variant_index); - let (size, align) = size_and_align_of(enum_type_and_layout); let variant_struct_type_wrapper = build_variant_struct_wrapper_type_di_node( cx, @@ -840,27 +839,23 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( }, ); - // We use LLVMRustDIBuilderCreateMemberType() member type directly because + // We use create_member_type() member type directly because // the build_field_di_node() function does not support specifying a source location, // which is something that we don't do anywhere else. - unsafe { - llvm::LLVMRustDIBuilderCreateMemberType( - DIB(cx), - enum_type_di_node, - field_name.as_c_char_ptr(), - field_name.len(), - file_di_node, - line_number, - // NOTE: We use the size and align of the entire type, not from variant_layout - // since the later is sometimes smaller (if it has fewer fields). - size.bits(), - align.bits() as u32, - // Union fields are always at offset zero - Size::ZERO.bits(), - di_flags, - variant_struct_type_wrapper, - ) - } + create_member_type( + cx, + enum_type_di_node, + &field_name, + file_di_node, + line_number, + // NOTE: We use the layout of the entire type, not from variant_layout + // since the later is sometimes smaller (if it has fewer fields). + enum_type_and_layout, + // Union fields are always at offset zero + Size::ZERO, + di_flags, + variant_struct_type_wrapper, + ) })); assert_eq!( @@ -874,7 +869,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( if is_128_bits { let type_di_node = type_di_node(cx, cx.tcx.types.u64); - let size_and_align = cx.size_and_align_of(cx.tcx.types.u64); + let u64_layout = cx.layout_of(cx.tcx.types.u64); let (lo_offset, hi_offset) = match cx.tcx.data_layout.endian { Endian::Little => (0, 8), @@ -889,7 +884,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( cx, enum_type_di_node, TAG_FIELD_NAME_128_LO, - size_and_align, + u64_layout, lo_offset, di_flags, type_di_node, @@ -900,7 +895,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( cx, enum_type_di_node, TAG_FIELD_NAME_128_HI, - size_and_align, + u64_layout, hi_offset, DIFlags::FlagZero, type_di_node, @@ -911,7 +906,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( cx, enum_type_di_node, TAG_FIELD_NAME, - cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty), + enum_type_and_layout.field(cx, tag_field), enum_type_and_layout.fields.offset(tag_field), di_flags, tag_base_type_di_node, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs index 9f6a5cc89e0..6792c307fdc 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs @@ -249,7 +249,7 @@ fn build_enum_variant_struct_type_di_node<'ll, 'tcx>( cx, struct_type_di_node, &field_name, - (field_layout.size, field_layout.align.abi), + field_layout, variant_layout.fields.offset(field_index), di_flags, type_di_node(cx, field_layout.ty), @@ -332,7 +332,7 @@ fn build_coroutine_variant_struct_type_di_node<'ll, 'tcx>( cx, variant_struct_type_di_node, &field_name, - cx.size_and_align_of(field_type), + cx.layout_of(field_type), variant_layout.fields.offset(field_index), DIFlags::FlagZero, type_di_node(cx, field_type), @@ -352,7 +352,7 @@ fn build_coroutine_variant_struct_type_di_node<'ll, 'tcx>( cx, variant_struct_type_di_node, upvar_name.as_str(), - cx.size_and_align_of(upvar_ty), + cx.layout_of(upvar_ty), coroutine_type_and_layout.fields.offset(index), DIFlags::FlagZero, type_di_node(cx, upvar_ty), @@ -363,6 +363,7 @@ fn build_coroutine_variant_struct_type_di_node<'ll, 'tcx>( state_specific_fields.into_iter().chain(common_fields).collect() }, + // FIXME: this is a no-op. `build_generic_type_param_di_nodes` only works for Adts. |cx| build_generic_type_param_di_nodes(cx, coroutine_type_and_layout.ty), ) .di_node diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs index 187d97c54c8..bfd131cfd3d 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs @@ -13,9 +13,9 @@ use smallvec::smallvec; use crate::common::{AsCCharPtr, CodegenCx}; use crate::debuginfo::metadata::type_map::{self, Stub, StubInfo, UniqueTypeId}; use crate::debuginfo::metadata::{ - DINodeCreationResult, NO_GENERICS, SmallVec, UNKNOWN_LINE_NUMBER, file_metadata, - file_metadata_from_def_id, size_and_align_of, type_di_node, unknown_file_metadata, - visibility_di_flags, + DINodeCreationResult, NO_GENERICS, SmallVec, UNKNOWN_LINE_NUMBER, create_member_type, + file_metadata, file_metadata_from_def_id, size_and_align_of, type_di_node, + unknown_file_metadata, visibility_di_flags, }; use crate::debuginfo::utils::{DIB, create_DIArray, get_namespace_for_item}; use crate::llvm::debuginfo::{DIFile, DIFlags, DIType}; @@ -363,23 +363,22 @@ fn build_discr_member_di_node<'ll, 'tcx>( &Variants::Multiple { tag_field, .. } => { let tag_base_type = tag_base_type(cx.tcx, enum_or_coroutine_type_and_layout); - let (size, align) = cx.size_and_align_of(tag_base_type); - - unsafe { - Some(llvm::LLVMRustDIBuilderCreateMemberType( - DIB(cx), - containing_scope, - tag_name.as_c_char_ptr(), - tag_name.len(), - unknown_file_metadata(cx), - UNKNOWN_LINE_NUMBER, - size.bits(), - align.bits() as u32, - enum_or_coroutine_type_and_layout.fields.offset(tag_field).bits(), - DIFlags::FlagArtificial, - type_di_node(cx, tag_base_type), - )) - } + let ty = type_di_node(cx, tag_base_type); + let file = unknown_file_metadata(cx); + + let layout = cx.layout_of(tag_base_type); + + Some(create_member_type( + cx, + containing_scope, + &tag_name, + file, + UNKNOWN_LINE_NUMBER, + layout, + enum_or_coroutine_type_and_layout.fields.offset(tag_field), + DIFlags::FlagArtificial, + ty, + )) } } } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs index af1d503ad6a..ae2ab32ef53 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs @@ -257,7 +257,7 @@ pub(super) fn build_type_with_children<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, stub_info: StubInfo<'ll, 'tcx>, members: impl FnOnce(&CodegenCx<'ll, 'tcx>, &'ll DIType) -> SmallVec<&'ll DIType>, - generics: impl FnOnce(&CodegenCx<'ll, 'tcx>) -> SmallVec<&'ll DIType>, + generics: impl FnOnce(&CodegenCx<'ll, 'tcx>) -> SmallVec<Option<&'ll DIType>>, ) -> DINodeCreationResult<'ll> { assert_eq!(debug_context(cx).type_map.di_node_for_unique_id(stub_info.unique_type_id), None); @@ -265,8 +265,7 @@ pub(super) fn build_type_with_children<'ll, 'tcx>( let members: SmallVec<_> = members(cx, stub_info.metadata).into_iter().map(|node| Some(node)).collect(); - let generics: SmallVec<Option<&'ll DIType>> = - generics(cx).into_iter().map(|node| Some(node)).collect(); + let generics = generics(cx); if !(members.is_empty() && generics.is_empty()) { unsafe { diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index 10819a53b1d..ae7d080db66 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -2,10 +2,11 @@ use std::cell::{OnceCell, RefCell}; use std::ops::Range; +use std::ptr; use std::sync::Arc; -use std::{iter, ptr}; use libc::c_uint; +use metadata::create_subroutine_type; use rustc_abi::Size; use rustc_codegen_ssa::debuginfo::type_names; use rustc_codegen_ssa::mir::debuginfo::VariableKind::*; @@ -34,8 +35,8 @@ use crate::builder::Builder; use crate::common::{AsCCharPtr, CodegenCx}; use crate::llvm; use crate::llvm::debuginfo::{ - DIArray, DIBuilderBox, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, DIType, - DIVariable, + DIArray, DIBuilderBox, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, + DITemplateTypeParameter, DIType, DIVariable, }; use crate::value::Value; @@ -251,7 +252,7 @@ struct DebugLoc { col: u32, } -impl CodegenCx<'_, '_> { +impl<'ll> CodegenCx<'ll, '_> { /// Looks up debug source information about a `BytePos`. // FIXME(eddyb) rename this to better indicate it's a duplicate of // `lookup_char_pos` rather than `dbg_loc`, perhaps by making @@ -279,6 +280,22 @@ impl CodegenCx<'_, '_> { DebugLoc { file, line, col } } } + + fn create_template_type_parameter( + &self, + name: &str, + actual_type_metadata: &'ll DIType, + ) -> &'ll DITemplateTypeParameter { + unsafe { + llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( + DIB(self), + None, + name.as_c_char_ptr(), + name.len(), + actual_type_metadata, + ) + } + } } impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { @@ -325,10 +342,8 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { let loc = self.lookup_debug_loc(span.lo()); let file_metadata = file_metadata(self, &loc.file); - let function_type_metadata = unsafe { - let fn_signature = get_function_signature(self, fn_abi); - llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature) - }; + let function_type_metadata = + create_subroutine_type(self, get_function_signature(self, fn_abi)); let mut name = String::with_capacity(64); type_names::push_item_name(tcx, def_id, false, &mut name); @@ -471,46 +486,10 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { generics: &ty::Generics, args: GenericArgsRef<'tcx>, ) -> &'ll DIArray { - if args.types().next().is_none() { - return create_DIArray(DIB(cx), &[]); - } - - // Again, only create type information if full debuginfo is enabled - let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { - let names = get_parameter_names(cx, generics); - iter::zip(args, names) - .filter_map(|(kind, name)| { - kind.as_type().map(|ty| { - let actual_type = cx.tcx.normalize_erasing_regions(cx.typing_env(), ty); - let actual_type_metadata = type_di_node(cx, actual_type); - let name = name.as_str(); - unsafe { - Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( - DIB(cx), - None, - name.as_c_char_ptr(), - name.len(), - actual_type_metadata, - )) - } - }) - }) - .collect() - } else { - vec![] - }; - + let template_params = metadata::get_template_parameters(cx, generics, args); create_DIArray(DIB(cx), &template_params) } - fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> { - let mut names = generics.parent.map_or_else(Vec::new, |def_id| { - get_parameter_names(cx, cx.tcx.generics_of(def_id)) - }); - names.extend(generics.own_params.iter().map(|param| param.name)); - names - } - /// Returns a scope, plus `true` if that's a type scope for "class" methods, /// otherwise `false` for plain namespace scopes. fn get_containing_scope<'ll, 'tcx>( diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index e79662ebc64..2419ec1f888 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -11,6 +11,8 @@ //! * Use define_* family of methods when you might be defining the Value. //! * When in doubt, define. +use std::borrow::Borrow; + use itertools::Itertools; use rustc_codegen_ssa::traits::TypeMembershipCodegenMethods; use rustc_data_structures::fx::FxIndexSet; @@ -22,7 +24,7 @@ use tracing::debug; use crate::abi::FnAbiLlvmExt; use crate::common::AsCCharPtr; -use crate::context::{CodegenCx, SimpleCx}; +use crate::context::{CodegenCx, GenericCx, SCx, SimpleCx}; use crate::llvm::AttributePlace::Function; use crate::llvm::Visibility; use crate::type_::Type; @@ -81,16 +83,25 @@ pub(crate) fn declare_raw_fn<'ll, 'tcx>( llfn } -impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { +impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { /// Declare a global value. /// /// If there’s a value with the same name already declared, the function will /// return its Value instead. pub(crate) fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value { debug!("declare_global(name={:?})", name); - unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_c_char_ptr(), name.len(), ty) } + unsafe { + llvm::LLVMRustGetOrInsertGlobal( + (**self).borrow().llmod, + name.as_c_char_ptr(), + name.len(), + ty, + ) + } } +} +impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// Declare a C ABI function. /// /// Only use this for foreign function ABIs and glue. For Rust functions use diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 1169b9c067d..67135fcc308 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -14,6 +14,7 @@ use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, LayoutOf}; use rustc_middle::ty::{self, GenericArgsRef, Ty}; use rustc_middle::{bug, span_bug}; use rustc_span::{Span, Symbol, sym}; +use rustc_symbol_mangling::mangle_internal_symbol; use rustc_target::callconv::{FnAbi, PassMode}; use rustc_target::spec::{HasTargetSpec, PanicStrategy}; use tracing::debug; @@ -470,7 +471,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { let layout = self.layout_of(tp_ty).layout; let use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, - Vector { .. } => false, + SimdVector { .. } => false, Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), @@ -649,7 +650,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { fn type_test(&mut self, pointer: Self::Value, typeid: Self::Metadata) -> Self::Value { // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time // optimization pass replaces calls to this intrinsic with code to test type membership. - let typeid = unsafe { llvm::LLVMMetadataAsValue(&self.llcx, typeid) }; + let typeid = self.get_metadata_value(typeid); self.call_intrinsic("llvm.type.test", &[pointer, typeid]) } @@ -659,7 +660,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { vtable_byte_offset: u64, typeid: &'ll Metadata, ) -> Self::Value { - let typeid = unsafe { llvm::LLVMMetadataAsValue(&self.llcx, typeid) }; + let typeid = self.get_metadata_value(typeid); let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32); let type_checked_load = self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid]); @@ -812,7 +813,10 @@ fn codegen_msvc_try<'ll>( let type_name = bx.const_bytes(b"rust_panic\0"); let type_info = bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false); - let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info)); + let tydesc = bx.declare_global( + &mangle_internal_symbol(bx.tcx, "__rust_panic_type_info"), + bx.val_ty(type_info), + ); llvm::set_linkage(tydesc, llvm::Linkage::LinkOnceODRLinkage); if bx.cx.tcx.sess.target.supports_comdat() { diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index e9e1b644f18..f622646a5d9 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -12,7 +12,6 @@ #![feature(exact_size_is_empty)] #![feature(extern_types)] #![feature(file_buffered)] -#![feature(hash_raw_entry)] #![feature(if_let_guard)] #![feature(impl_trait_in_assoc_type)] #![feature(iter_intersperse)] @@ -20,7 +19,6 @@ #![feature(rustdoc_internals)] #![feature(slice_as_array)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::any::Any; @@ -29,6 +27,7 @@ use std::mem::ManuallyDrop; use back::owned_target_machine::OwnedTargetMachine; use back::write::{create_informational_target_machine, create_target_machine}; +use context::SimpleCx; use errors::{AutoDiffWithoutLTO, ParseTargetMachineConfig}; pub(crate) use llvm_util::target_features_cfg; use rustc_ast::expand::allocator::AllocatorKind; @@ -116,9 +115,11 @@ impl ExtraBackendMethods for LlvmCodegenBackend { kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind, ) -> ModuleLlvm { - let mut module_llvm = ModuleLlvm::new_metadata(tcx, module_name); + let module_llvm = ModuleLlvm::new_metadata(tcx, module_name); + let cx = + SimpleCx::new(module_llvm.llmod(), &module_llvm.llcx, tcx.data_layout.pointer_size); unsafe { - allocator::codegen(tcx, &mut module_llvm, module_name, kind, alloc_error_handler_kind); + allocator::codegen(tcx, cx, module_name, kind, alloc_error_handler_kind); } module_llvm } @@ -194,7 +195,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { unsafe fn optimize( cgcx: &CodegenContext<Self>, dcx: DiagCtxtHandle<'_>, - module: &ModuleCodegen<Self::Module>, + module: &mut ModuleCodegen<Self::Module>, config: &ModuleConfig, ) -> Result<(), FatalError> { unsafe { back::write::optimize(cgcx, dcx, module, config) } @@ -339,8 +340,8 @@ impl CodegenBackend for LlvmCodegenBackend { llvm_util::print_version(); } - fn target_features_cfg(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> { - target_features_cfg(sess, allow_unstable) + fn target_features_cfg(&self, sess: &Session) -> (Vec<Symbol>, Vec<Symbol>) { + target_features_cfg(sess) } fn codegen_crate<'tcx>( diff --git a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs index 25ca3498803..f6b23862907 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs @@ -3,13 +3,14 @@ use libc::{c_char, c_uint}; +use super::MetadataKindId; use super::ffi::{BasicBlock, Metadata, Module, Type, Value}; use crate::llvm::Bool; #[link(name = "llvm-wrapper", kind = "static")] unsafe extern "C" { // Enzyme - pub(crate) fn LLVMRustHasMetadata(I: &Value, KindID: c_uint) -> bool; + pub(crate) safe fn LLVMRustHasMetadata(I: &Value, KindID: MetadataKindId) -> bool; pub(crate) fn LLVMRustEraseInstUntilInclusive(BB: &BasicBlock, I: &Value); pub(crate) fn LLVMRustGetLastInstruction<'a>(BB: &BasicBlock) -> Option<&'a Value>; pub(crate) fn LLVMRustDIGetInstMetadata(I: &Value) -> Option<&Metadata>; diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index da91e6edbcf..83efb3ea660 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -976,6 +976,16 @@ pub type SelfProfileAfterPassCallback = unsafe extern "C" fn(*mut c_void); pub type GetSymbolsCallback = unsafe extern "C" fn(*mut c_void, *const c_char) -> *mut c_void; pub type GetSymbolsErrorCallback = unsafe extern "C" fn(*const c_char) -> *mut c_void; +#[derive(Copy, Clone)] +#[repr(transparent)] +pub struct MetadataKindId(c_uint); + +impl From<MetadataType> for MetadataKindId { + fn from(value: MetadataType) -> Self { + Self(value as c_uint) + } +} + unsafe extern "C" { // Create and destroy contexts. pub(crate) fn LLVMContextDispose(C: &'static mut Context); @@ -983,7 +993,7 @@ unsafe extern "C" { C: &Context, Name: *const c_char, SLen: c_uint, - ) -> c_uint; + ) -> MetadataKindId; // Create modules. pub(crate) fn LLVMModuleCreateWithNameInContext( @@ -1046,14 +1056,13 @@ unsafe extern "C" { pub(crate) fn LLVMMetadataTypeInContext(C: &Context) -> &Type; // Operations on all values - pub(crate) fn LLVMIsUndef(Val: &Value) -> Bool; pub(crate) fn LLVMTypeOf(Val: &Value) -> &Type; pub(crate) fn LLVMGetValueName2(Val: &Value, Length: *mut size_t) -> *const c_char; pub(crate) fn LLVMSetValueName2(Val: &Value, Name: *const c_char, NameLen: size_t); pub(crate) fn LLVMReplaceAllUsesWith<'a>(OldVal: &'a Value, NewVal: &'a Value); - pub(crate) fn LLVMSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Node: &'a Value); + pub(crate) safe fn LLVMSetMetadata<'a>(Val: &'a Value, KindID: MetadataKindId, Node: &'a Value); pub(crate) fn LLVMGlobalSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata); - pub(crate) fn LLVMValueAsMetadata(Node: &Value) -> &Metadata; + pub(crate) safe fn LLVMValueAsMetadata(Node: &Value) -> &Metadata; // Operations on constants of any type pub(crate) fn LLVMConstNull(Ty: &Type) -> &Value; @@ -1147,7 +1156,7 @@ unsafe extern "C" { pub(crate) fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode); pub(crate) fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool; pub(crate) fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool); - pub(crate) fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); + pub(crate) safe fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); // Operations on attributes pub(crate) fn LLVMCreateStringAttribute( @@ -1204,7 +1213,7 @@ unsafe extern "C" { pub(crate) fn LLVMGetCurrentDebugLocation2<'a>(Builder: &Builder<'a>) -> Option<&'a Metadata>; // Terminators - pub(crate) fn LLVMBuildRetVoid<'a>(B: &Builder<'a>) -> &'a Value; + pub(crate) safe fn LLVMBuildRetVoid<'a>(B: &Builder<'a>) -> &'a Value; pub(crate) fn LLVMBuildRet<'a>(B: &Builder<'a>, V: &'a Value) -> &'a Value; pub(crate) fn LLVMBuildBr<'a>(B: &Builder<'a>, Dest: &'a BasicBlock) -> &'a Value; pub(crate) fn LLVMBuildCondBr<'a>( @@ -1680,7 +1689,7 @@ unsafe extern "C" { Packed: Bool, ); - pub(crate) fn LLVMMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value; + pub(crate) safe fn LLVMMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value; pub(crate) fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr); @@ -2010,6 +2019,8 @@ unsafe extern "C" { NumExpressions: size_t, CodeRegions: *const crate::coverageinfo::ffi::CodeRegion, NumCodeRegions: size_t, + ExpansionRegions: *const crate::coverageinfo::ffi::ExpansionRegion, + NumExpansionRegions: size_t, BranchRegions: *const crate::coverageinfo::ffi::BranchRegion, NumBranchRegions: size_t, MCDCBranchRegions: *const crate::coverageinfo::ffi::MCDCBranchRegion, @@ -2425,7 +2436,9 @@ unsafe extern "C" { NoPrepopulatePasses: bool, VerifyIR: bool, LintIR: bool, - UseThinLTOBuffers: bool, + ThinLTOBuffer: Option<&mut *mut ThinLTOBuffer>, + EmitThinLTO: bool, + EmitThinLTOSummary: bool, MergeFunctions: bool, UnrollLoops: bool, SLPVectorize: bool, diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index 5cc4f4ab9e6..4a166b0872d 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -306,45 +306,44 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea /// Must express features in the way Rust understands them. /// /// We do not have to worry about RUSTC_SPECIFIC_FEATURES here, those are handled outside codegen. -pub(crate) fn target_features_cfg(sess: &Session, allow_unstable: bool) -> Vec<Symbol> { - let mut features: FxHashSet<Symbol> = Default::default(); - +pub(crate) fn target_features_cfg(sess: &Session) -> (Vec<Symbol>, Vec<Symbol>) { // Add base features for the target. // We do *not* add the -Ctarget-features there, and instead duplicate the logic for that below. // The reason is that if LLVM considers a feature implied but we do not, we don't want that to // show up in `cfg`. That way, `cfg` is entirely under our control -- except for the handling of - // the target CPU, that is still expanded to target features (with all their implied features) by - // LLVM. + // the target CPU, that is still expanded to target features (with all their implied features) + // by LLVM. let target_machine = create_informational_target_machine(sess, true); - // Compute which of the known target features are enabled in the 'base' target machine. - // We only consider "supported" features; "forbidden" features are not reflected in `cfg` as of now. - features.extend( - sess.target - .rust_target_features() - .iter() - .filter(|(feature, _, _)| { - // skip checking special features, as LLVM may not understand them - if RUSTC_SPECIAL_FEATURES.contains(feature) { - return true; - } - // check that all features in a given smallvec are enabled - if let Some(feat) = to_llvm_features(sess, feature) { - for llvm_feature in feat { - let cstr = SmallCStr::new(llvm_feature); - if !unsafe { llvm::LLVMRustHasFeature(target_machine.raw(), cstr.as_ptr()) } - { - return false; - } + // Compute which of the known target features are enabled in the 'base' target machine. We only + // consider "supported" features; "forbidden" features are not reflected in `cfg` as of now. + let mut features: FxHashSet<Symbol> = sess + .target + .rust_target_features() + .iter() + .filter(|(feature, _, _)| { + // skip checking special features, as LLVM may not understand them + if RUSTC_SPECIAL_FEATURES.contains(feature) { + return true; + } + if let Some(feat) = to_llvm_features(sess, feature) { + for llvm_feature in feat { + let cstr = SmallCStr::new(llvm_feature); + // `LLVMRustHasFeature` is moderately expensive. On targets with many + // features (e.g. x86) these calls take a non-trivial fraction of runtime + // when compiling very small programs. + if !unsafe { llvm::LLVMRustHasFeature(target_machine.raw(), cstr.as_ptr()) } { + return false; } - true - } else { - false } - }) - .map(|(feature, _, _)| Symbol::intern(feature)), - ); + true + } else { + false + } + }) + .map(|(feature, _, _)| Symbol::intern(feature)) + .collect(); - // Add enabled features + // Add enabled and remove disabled features. for (enabled, feature) in sess.opts.cg.target_feature.split(',').filter_map(|s| match s.chars().next() { Some('+') => Some((true, Symbol::intern(&s[1..]))), @@ -360,7 +359,7 @@ pub(crate) fn target_features_cfg(sess: &Session, allow_unstable: bool) -> Vec<S #[allow(rustc::potential_query_instability)] features.extend( sess.target - .implied_target_features(std::iter::once(feature.as_str())) + .implied_target_features(feature.as_str()) .iter() .map(|s| Symbol::intern(s)), ); @@ -371,11 +370,7 @@ pub(crate) fn target_features_cfg(sess: &Session, allow_unstable: bool) -> Vec<S // `features.contains` below. #[allow(rustc::potential_query_instability)] features.retain(|f| { - if sess - .target - .implied_target_features(std::iter::once(f.as_str())) - .contains(&feature.as_str()) - { + if sess.target.implied_target_features(f.as_str()).contains(&feature.as_str()) { // If `f` if implies `feature`, then `!feature` implies `!f`, so we have to // remove `f`. (This is the standard logical contraposition principle.) false @@ -387,25 +382,31 @@ pub(crate) fn target_features_cfg(sess: &Session, allow_unstable: bool) -> Vec<S } } - // Filter enabled features based on feature gates - sess.target - .rust_target_features() - .iter() - .filter_map(|(feature, gate, _)| { - // The `allow_unstable` set is used by rustc internally to determined which target - // features are truly available, so we want to return even perma-unstable "forbidden" - // features. - if allow_unstable - || (gate.in_cfg() && (sess.is_nightly_build() || gate.requires_nightly().is_none())) - { - Some(*feature) - } else { - None - } - }) - .filter(|feature| features.contains(&Symbol::intern(feature))) - .map(|feature| Symbol::intern(feature)) - .collect() + // Filter enabled features based on feature gates. + let f = |allow_unstable| { + sess.target + .rust_target_features() + .iter() + .filter_map(|(feature, gate, _)| { + // The `allow_unstable` set is used by rustc internally to determined which target + // features are truly available, so we want to return even perma-unstable + // "forbidden" features. + if allow_unstable + || (gate.in_cfg() + && (sess.is_nightly_build() || gate.requires_nightly().is_none())) + { + Some(Symbol::intern(feature)) + } else { + None + } + }) + .filter(|feature| features.contains(&feature)) + .collect() + }; + + let target_features = f(false); + let unstable_target_features = f(true); + (target_features, unstable_target_features) } pub(crate) fn print_version() { @@ -682,7 +683,7 @@ pub(crate) fn global_llvm_features( for feature in sess.opts.cg.target_feature.split(',') { if let Some(feature) = feature.strip_prefix('+') { all_rust_features.extend( - UnordSet::from(sess.target.implied_target_features(std::iter::once(feature))) + UnordSet::from(sess.target.implied_target_features(feature)) .to_sorted_stable_ord() .iter() .map(|&&s| (true, s)), diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index d61ce417562..b89ce90d1a1 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -1,3 +1,4 @@ +use std::borrow::Borrow; use std::{fmt, ptr}; use libc::{c_char, c_uint}; @@ -11,7 +12,7 @@ use rustc_middle::ty::{self, Ty}; use rustc_target::callconv::{CastTarget, FnAbi}; use crate::abi::{FnAbiLlvmExt, LlvmType}; -use crate::context::{CodegenCx, SimpleCx}; +use crate::context::{CodegenCx, GenericCx, SCx}; pub(crate) use crate::llvm::Type; use crate::llvm::{Bool, False, Metadata, True}; use crate::type_of::LayoutLlvmExt; @@ -36,29 +37,29 @@ impl fmt::Debug for Type { } impl<'ll> CodegenCx<'ll, '_> {} -impl<'ll> SimpleCx<'ll> { +impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { pub(crate) fn type_named_struct(&self, name: &str) -> &'ll Type { let name = SmallCStr::new(name); - unsafe { llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr()) } + unsafe { llvm::LLVMStructCreateNamed(self.llcx(), name.as_ptr()) } } pub(crate) fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) { unsafe { llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) } } pub(crate) fn type_void(&self) -> &'ll Type { - unsafe { llvm::LLVMVoidTypeInContext(self.llcx) } + unsafe { llvm::LLVMVoidTypeInContext(self.llcx()) } } pub(crate) fn type_token(&self) -> &'ll Type { - unsafe { llvm::LLVMTokenTypeInContext(self.llcx) } + unsafe { llvm::LLVMTokenTypeInContext(self.llcx()) } } pub(crate) fn type_metadata(&self) -> &'ll Type { - unsafe { llvm::LLVMMetadataTypeInContext(self.llcx) } + unsafe { llvm::LLVMMetadataTypeInContext(self.llcx()) } } ///x Creates an integer type with the given number of bits, e.g., i24 pub(crate) fn type_ix(&self, num_bits: u64) -> &'ll Type { - unsafe { llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) } + unsafe { llvm::LLVMIntTypeInContext(self.llcx(), num_bits as c_uint) } } pub(crate) fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { @@ -121,19 +122,28 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { self.type_array(self.type_from_integer(unit), size / unit_size) } } -impl<'ll> SimpleCx<'ll> { + +impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { + pub(crate) fn llcx(&self) -> &'ll llvm::Context { + (**self).borrow().llcx + } + + pub(crate) fn isize_ty(&self) -> &'ll Type { + (**self).borrow().isize_ty + } + pub(crate) fn type_variadic_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) } } pub(crate) fn type_i1(&self) -> &'ll Type { - unsafe { llvm::LLVMInt1TypeInContext(self.llcx) } + unsafe { llvm::LLVMInt1TypeInContext(self.llcx()) } } pub(crate) fn type_struct(&self, els: &[&'ll Type], packed: bool) -> &'ll Type { unsafe { llvm::LLVMStructTypeInContext( - self.llcx, + self.llcx(), els.as_ptr(), els.len() as c_uint, packed as Bool, @@ -142,45 +152,45 @@ impl<'ll> SimpleCx<'ll> { } } -impl<'ll, 'tcx> BaseTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { +impl<'ll, CX: Borrow<SCx<'ll>>> BaseTypeCodegenMethods for GenericCx<'ll, CX> { fn type_i8(&self) -> &'ll Type { - unsafe { llvm::LLVMInt8TypeInContext(self.llcx) } + unsafe { llvm::LLVMInt8TypeInContext(self.llcx()) } } fn type_i16(&self) -> &'ll Type { - unsafe { llvm::LLVMInt16TypeInContext(self.llcx) } + unsafe { llvm::LLVMInt16TypeInContext(self.llcx()) } } fn type_i32(&self) -> &'ll Type { - unsafe { llvm::LLVMInt32TypeInContext(self.llcx) } + unsafe { llvm::LLVMInt32TypeInContext(self.llcx()) } } fn type_i64(&self) -> &'ll Type { - unsafe { llvm::LLVMInt64TypeInContext(self.llcx) } + unsafe { llvm::LLVMInt64TypeInContext(self.llcx()) } } fn type_i128(&self) -> &'ll Type { - unsafe { llvm::LLVMIntTypeInContext(self.llcx, 128) } + unsafe { llvm::LLVMIntTypeInContext(self.llcx(), 128) } } fn type_isize(&self) -> &'ll Type { - self.isize_ty + self.isize_ty() } fn type_f16(&self) -> &'ll Type { - unsafe { llvm::LLVMHalfTypeInContext(self.llcx) } + unsafe { llvm::LLVMHalfTypeInContext(self.llcx()) } } fn type_f32(&self) -> &'ll Type { - unsafe { llvm::LLVMFloatTypeInContext(self.llcx) } + unsafe { llvm::LLVMFloatTypeInContext(self.llcx()) } } fn type_f64(&self) -> &'ll Type { - unsafe { llvm::LLVMDoubleTypeInContext(self.llcx) } + unsafe { llvm::LLVMDoubleTypeInContext(self.llcx()) } } fn type_f128(&self) -> &'ll Type { - unsafe { llvm::LLVMFP128TypeInContext(self.llcx) } + unsafe { llvm::LLVMFP128TypeInContext(self.llcx()) } } fn type_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { @@ -196,7 +206,7 @@ impl<'ll, 'tcx> BaseTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type { - unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) } + unsafe { llvm::LLVMPointerTypeInContext(self.llcx(), address_space.0) } } fn element_type(&self, ty: &'ll Type) -> &'ll Type { diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index ba01fbff385..4e7096da502 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -19,7 +19,7 @@ fn uncached_llvm_type<'a, 'tcx>( ) -> &'a Type { match layout.backend_repr { BackendRepr::Scalar(_) => bug!("handled elsewhere"), - BackendRepr::Vector { element, count } => { + BackendRepr::SimdVector { element, count } => { let element = layout.scalar_llvm_type_at(cx, element); return cx.type_vector(element, count); } @@ -171,7 +171,7 @@ pub(crate) trait LayoutLlvmExt<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_immediate(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true, + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false, } } @@ -179,9 +179,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_scalar_pair(&self) -> bool { match self.backend_repr { BackendRepr::ScalarPair(..) => true, - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => { - false - } + BackendRepr::Scalar(_) + | BackendRepr::SimdVector { .. } + | BackendRepr::Memory { .. } => false, } } |
