From 4bd84b23a8537314132e98b9fb2c3fea2cb57496 Mon Sep 17 00:00:00 2001 From: Zalathar Date: Mon, 28 Oct 2024 18:52:39 +1100 Subject: Use a type-safe helper to cast `&str` and `&[u8]` to `*const c_char` --- compiler/rustc_codegen_llvm/src/allocator.rs | 9 ++-- compiler/rustc_codegen_llvm/src/asm.rs | 10 ++--- compiler/rustc_codegen_llvm/src/back/lto.rs | 3 +- compiler/rustc_codegen_llvm/src/back/write.rs | 11 ++--- compiler/rustc_codegen_llvm/src/common.rs | 18 ++++++++ compiler/rustc_codegen_llvm/src/consts.rs | 8 ++-- compiler/rustc_codegen_llvm/src/context.rs | 11 ++--- .../rustc_codegen_llvm/src/coverageinfo/mod.rs | 8 ++-- .../rustc_codegen_llvm/src/debuginfo/metadata.rs | 48 +++++++++++----------- .../src/debuginfo/metadata/enums/cpp_like.rs | 8 ++-- .../src/debuginfo/metadata/enums/mod.rs | 6 +-- .../src/debuginfo/metadata/enums/native.rs | 10 ++--- .../src/debuginfo/metadata/type_map.rs | 10 ++--- compiler/rustc_codegen_llvm/src/debuginfo/mod.rs | 14 +++---- .../rustc_codegen_llvm/src/debuginfo/namespace.rs | 4 +- compiler/rustc_codegen_llvm/src/declare.rs | 7 ++-- compiler/rustc_codegen_llvm/src/llvm/mod.rs | 12 +++--- 17 files changed, 110 insertions(+), 87 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index 55ee6dcdf87..a2e6c7eb856 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -7,6 +7,7 @@ use rustc_middle::bug; use rustc_middle::ty::TyCtxt; use rustc_session::config::{DebugInfo, OomStrategy}; +use crate::common::AsCCharPtr; use crate::llvm::{self, Context, False, Module, True, Type}; use crate::{ModuleLlvm, attributes, debuginfo}; @@ -76,14 +77,14 @@ pub(crate) unsafe fn codegen( unsafe { // __rust_alloc_error_handler_should_panic let name = OomStrategy::SYMBOL; - let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8); + let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_c_char_ptr(), name.len(), i8); llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility())); let val = tcx.sess.opts.unstable_opts.oom.should_panic(); let llval = llvm::LLVMConstInt(i8, val as u64, False); llvm::LLVMSetInitializer(ll_g, llval); let name = NO_ALLOC_SHIM_IS_UNSTABLE; - let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_ptr().cast(), name.len(), i8); + let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_c_char_ptr(), name.len(), i8); llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility())); let llval = llvm::LLVMConstInt(i8, 0, False); llvm::LLVMSetInitializer(ll_g, llval); @@ -115,7 +116,7 @@ fn create_wrapper_function( ); let llfn = llvm::LLVMRustGetOrInsertFunction( llmod, - from_name.as_ptr().cast(), + from_name.as_c_char_ptr(), from_name.len(), ty, ); @@ -137,7 +138,7 @@ fn create_wrapper_function( } let callee = - llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_ptr().cast(), to_name.len(), ty); + llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_c_char_ptr(), to_name.len(), ty); if let Some(no_return) = no_return { // -> ! DIFlagNoReturn attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]); diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index d1d7d0cf4ce..3c30822a2e2 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -15,7 +15,7 @@ use smallvec::SmallVec; use tracing::debug; use crate::builder::Builder; -use crate::common::Funclet; +use crate::common::{AsCCharPtr, Funclet}; use crate::context::CodegenCx; use crate::type_::Type; use crate::type_of::LayoutLlvmExt; @@ -420,7 +420,7 @@ impl<'tcx> AsmCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> { unsafe { llvm::LLVMAppendModuleInlineAsm( self.llmod, - template_str.as_ptr().cast(), + template_str.as_c_char_ptr(), template_str.len(), ); } @@ -458,14 +458,14 @@ pub(crate) fn inline_asm_call<'ll>( let fty = bx.cx.type_func(&argtys, output); unsafe { // Ask LLVM to verify that the constraints are well-formed. - let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len()); + let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_c_char_ptr(), cons.len()); debug!("constraint verification result: {:?}", constraints_ok); if constraints_ok { let v = llvm::LLVMRustInlineAsm( fty, - asm.as_ptr().cast(), + asm.as_c_char_ptr(), asm.len(), - cons.as_ptr().cast(), + cons.as_c_char_ptr(), cons.len(), volatile, alignstack, diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 1f7a923dd2c..79527d1beb3 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -25,6 +25,7 @@ use tracing::{debug, info}; use crate::back::write::{ self, CodegenDiagnosticsStage, DiagnosticHandlers, bitcode_section_name, save_temp_bitcode, }; +use crate::common::AsCCharPtr; use crate::errors::{ DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro, }; @@ -604,7 +605,7 @@ pub(crate) fn run_pass_manager( unsafe { if !llvm::LLVMRustHasModuleFlag( module.module_llvm.llmod(), - "LTOPostLink".as_ptr().cast(), + "LTOPostLink".as_c_char_ptr(), 11, ) { llvm::LLVMRustAddModuleFlagU32( diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index bf77fc56d08..e68ba96f14f 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -34,6 +34,7 @@ use crate::back::owned_target_machine::OwnedTargetMachine; use crate::back::profiling::{ LlvmSelfProfiler, selfprofile_after_pass_callback, selfprofile_before_pass_callback, }; +use crate::common::AsCCharPtr; use crate::errors::{ CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression, WithLlvmError, WriteBytecode, @@ -596,9 +597,9 @@ pub(crate) unsafe fn llvm_optimize( llvm_selfprofiler, selfprofile_before_pass_callback, selfprofile_after_pass_callback, - extra_passes.as_ptr().cast(), + extra_passes.as_c_char_ptr(), extra_passes.len(), - llvm_plugins.as_ptr().cast(), + llvm_plugins.as_c_char_ptr(), llvm_plugins.len(), ) }; @@ -1042,7 +1043,7 @@ unsafe fn embed_bitcode( llvm::LLVMSetInitializer(llglobal, llconst); let section = bitcode_section_name(cgcx); - llvm::LLVMSetSection(llglobal, section.as_ptr().cast()); + llvm::LLVMSetSection(llglobal, section.as_c_char_ptr()); llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::LLVMSetGlobalConstant(llglobal, llvm::True); @@ -1066,9 +1067,9 @@ unsafe fn embed_bitcode( // We need custom section flags, so emit module-level inline assembly. let section_flags = if cgcx.is_pe_coff { "n" } else { "e" }; let asm = create_section_with_flags_asm(".llvmbc", section_flags, bitcode); - llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len()); + llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_c_char_ptr(), asm.len()); let asm = create_section_with_flags_asm(".llvmcmd", section_flags, cmdline.as_bytes()); - llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_ptr().cast(), asm.len()); + llvm::LLVMAppendModuleInlineAsm(llmod, asm.as_c_char_ptr(), asm.len()); } } } diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index ff47eb944dd..29adc616ee2 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -392,3 +392,21 @@ pub(crate) fn get_dllimport<'tcx>( tcx.native_library(id) .and_then(|lib| lib.dll_imports.iter().find(|di| di.name.as_str() == name)) } + +/// Extension trait for explicit casts to `*const c_char`. +pub(crate) trait AsCCharPtr { + /// Equivalent to `self.as_ptr().cast()`, but only casts to `*const c_char`. + fn as_c_char_ptr(&self) -> *const c_char; +} + +impl AsCCharPtr for str { + fn as_c_char_ptr(&self) -> *const c_char { + self.as_ptr().cast() + } +} + +impl AsCCharPtr for [u8] { + fn as_c_char_ptr(&self) -> *const c_char { + self.as_ptr().cast() + } +} diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 1e3abcfd1af..21d996ef460 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -19,7 +19,7 @@ use rustc_target::abi::{ }; use tracing::{debug, instrument, trace}; -use crate::common::CodegenCx; +use crate::common::{AsCCharPtr, CodegenCx}; use crate::errors::{ InvalidMinimumAlignmentNotPowerOfTwo, InvalidMinimumAlignmentTooLarge, SymbolAlreadyDefined, }; @@ -400,7 +400,7 @@ impl<'ll> CodegenCx<'ll, '_> { let new_g = llvm::LLVMRustGetOrInsertGlobal( self.llmod, - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), val_llty, ); @@ -451,7 +451,7 @@ impl<'ll> CodegenCx<'ll, '_> { if let Some(section) = attrs.link_section { let section = llvm::LLVMMDStringInContext2( self.llcx, - section.as_str().as_ptr().cast(), + section.as_str().as_c_char_ptr(), section.as_str().len(), ); assert!(alloc.provenance().ptrs().is_empty()); @@ -462,7 +462,7 @@ impl<'ll> CodegenCx<'ll, '_> { let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len()); let alloc = - llvm::LLVMMDStringInContext2(self.llcx, bytes.as_ptr().cast(), bytes.len()); + llvm::LLVMMDStringInContext2(self.llcx, bytes.as_c_char_ptr(), bytes.len()); let data = [section, alloc]; let meta = llvm::LLVMMDNodeInContext2(self.llcx, data.as_ptr(), data.len()); let val = llvm::LLVMMetadataAsValue(self.llcx, meta); diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 067028a16ff..1a2197248b1 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -29,6 +29,7 @@ use smallvec::SmallVec; use crate::back::write::to_llvm_code_model; use crate::callee::get_fn; +use crate::common::AsCCharPtr; use crate::debuginfo::metadata::apply_vcall_visibility_metadata; use crate::llvm::{Metadata, MetadataType}; use crate::type_::Type; @@ -231,7 +232,7 @@ pub(crate) unsafe fn create_module<'ll>( // If we're normalizing integers with CFI, ensure LLVM generated functions do the same. // See https://github.com/llvm/llvm-project/pull/104826 if sess.is_sanitizer_cfi_normalize_integers_enabled() { - let cfi_normalize_integers = c"cfi-normalize-integers".as_ptr().cast(); + let cfi_normalize_integers = c"cfi-normalize-integers".as_ptr(); unsafe { llvm::LLVMRustAddModuleFlagU32( llmod, @@ -268,7 +269,7 @@ pub(crate) unsafe fn create_module<'ll>( let pfe = PatchableFunctionEntry::from_config(sess.opts.unstable_opts.patchable_function_entry); if pfe.prefix() > 0 { - let kcfi_offset = c"kcfi-offset".as_ptr().cast(); + let kcfi_offset = c"kcfi-offset".as_ptr(); unsafe { llvm::LLVMRustAddModuleFlagU32( llmod, @@ -429,7 +430,7 @@ pub(crate) unsafe fn create_module<'ll>( let name_metadata = unsafe { llvm::LLVMMDStringInContext2( llcx, - rustc_producer.as_ptr().cast(), + rustc_producer.as_c_char_ptr(), rustc_producer.as_bytes().len(), ) }; @@ -453,7 +454,7 @@ pub(crate) unsafe fn create_module<'ll>( llmod, llvm::LLVMModFlagBehavior::Error, c"target-abi".as_ptr(), - llvm_abiname.as_ptr().cast(), + llvm_abiname.as_c_char_ptr(), llvm_abiname.len(), ); } @@ -474,7 +475,7 @@ pub(crate) unsafe fn create_module<'ll>( // We already checked this during option parsing _ => unreachable!(), }; - unsafe { llvm::LLVMRustAddModuleFlagU32(llmod, behavior, key.as_ptr().cast(), *value) } + unsafe { llvm::LLVMRustAddModuleFlagU32(llmod, behavior, key.as_c_char_ptr(), *value) } } llmod diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index c6b2a623ea6..a298ed86276 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -14,7 +14,7 @@ use rustc_target::abi::Size; use tracing::{debug, instrument}; use crate::builder::Builder; -use crate::common::CodegenCx; +use crate::common::{AsCCharPtr, CodegenCx}; use crate::coverageinfo::map_data::FunctionCoverageCollector; use crate::llvm; @@ -236,7 +236,7 @@ fn create_pgo_func_name_var<'ll, 'tcx>( unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar( llfn, - mangled_fn_name.as_ptr().cast(), + mangled_fn_name.as_c_char_ptr(), mangled_fn_name.len(), ) } @@ -248,7 +248,7 @@ pub(crate) fn write_filenames_section_to_buffer<'a>( ) { let (pointers, lengths) = filenames .into_iter() - .map(|s: &str| (s.as_ptr().cast(), s.len())) + .map(|s: &str| (s.as_c_char_ptr(), s.len())) .unzip::<_, _, Vec<_>, Vec<_>>(); unsafe { @@ -291,7 +291,7 @@ pub(crate) fn write_mapping_to_buffer( } pub(crate) fn hash_bytes(bytes: &[u8]) -> u64 { - unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_ptr().cast(), bytes.len()) } + unsafe { llvm::LLVMRustCoverageHashByteArray(bytes.as_c_char_ptr(), bytes.len()) } } pub(crate) fn mapping_version() -> u32 { diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs index 15d441a986d..9064cfaeb29 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs @@ -32,7 +32,7 @@ use super::type_names::{compute_debuginfo_type_name, compute_debuginfo_vtable_na use super::utils::{ DIB, create_DIArray, debug_context, get_namespace_for_item, is_node_local_to_unit, }; -use crate::common::CodegenCx; +use crate::common::{AsCCharPtr, CodegenCx}; use crate::debuginfo::metadata::type_map::build_type_with_children; use crate::debuginfo::utils::{WidePtrKind, wide_pointer_kind}; use crate::llvm::debuginfo::{ @@ -190,7 +190,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>( data_layout.pointer_size.bits(), data_layout.pointer_align.abi.bits() as u32, 0, // Ignore DWARF address space. - ptr_type_debuginfo_name.as_ptr().cast(), + ptr_type_debuginfo_name.as_c_char_ptr(), ptr_type_debuginfo_name.len(), ) }; @@ -348,7 +348,7 @@ fn build_subroutine_type_di_node<'ll, 'tcx>( size, align, 0, // Ignore DWARF address space. - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), ) }; @@ -518,7 +518,7 @@ fn recursion_marker_type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) -> &'ll D let name = ""; llvm::LLVMRustDIBuilderCreateBasicType( DIB(cx), - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), cx.tcx.data_layout.pointer_size.bits(), DW_ATE_unsigned, @@ -640,14 +640,14 @@ pub(crate) fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFi unsafe { llvm::LLVMRustDIBuilderCreateFile( DIB(cx), - file_name.as_ptr().cast(), + file_name.as_c_char_ptr(), file_name.len(), - directory.as_ptr().cast(), + directory.as_c_char_ptr(), directory.len(), hash_kind, - hash_value.as_ptr().cast(), + hash_value.as_c_char_ptr(), hash_value.len(), - source.map_or(ptr::null(), |x| x.as_ptr().cast()), + source.map_or(ptr::null(), |x| x.as_c_char_ptr()), source.map_or(0, |x| x.len()), ) } @@ -662,12 +662,12 @@ fn unknown_file_metadata<'ll>(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile { llvm::LLVMRustDIBuilderCreateFile( DIB(cx), - file_name.as_ptr().cast(), + file_name.as_c_char_ptr(), file_name.len(), - directory.as_ptr().cast(), + directory.as_c_char_ptr(), directory.len(), llvm::ChecksumKind::None, - hash_value.as_ptr().cast(), + hash_value.as_c_char_ptr(), hash_value.len(), ptr::null(), 0, @@ -788,7 +788,7 @@ fn build_basic_type_di_node<'ll, 'tcx>( let ty_di_node = unsafe { llvm::LLVMRustDIBuilderCreateBasicType( DIB(cx), - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), cx.size_of(t).bits(), encoding, @@ -810,7 +810,7 @@ fn build_basic_type_di_node<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateTypedef( DIB(cx), ty_di_node, - typedef_name.as_ptr().cast(), + typedef_name.as_c_char_ptr(), typedef_name.len(), unknown_file_metadata(cx), 0, @@ -861,7 +861,7 @@ fn build_param_type_di_node<'ll, 'tcx>( di_node: unsafe { llvm::LLVMRustDIBuilderCreateBasicType( DIB(cx), - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), Size::ZERO.bits(), DW_ATE_unsigned, @@ -948,9 +948,9 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>( unsafe { let compile_unit_file = llvm::LLVMRustDIBuilderCreateFile( debug_context.builder, - name_in_debuginfo.as_ptr().cast(), + name_in_debuginfo.as_c_char_ptr(), name_in_debuginfo.len(), - work_dir.as_ptr().cast(), + work_dir.as_c_char_ptr(), work_dir.len(), llvm::ChecksumKind::None, ptr::null(), @@ -963,7 +963,7 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>( debug_context.builder, DW_LANG_RUST, compile_unit_file, - producer.as_ptr().cast(), + producer.as_c_char_ptr(), producer.len(), tcx.sess.opts.optimize != config::OptLevel::No, c"".as_ptr(), @@ -971,7 +971,7 @@ pub(crate) fn build_compile_unit_di_node<'ll, 'tcx>( // NB: this doesn't actually have any perceptible effect, it seems. LLVM will instead // put the path supplied to `MCSplitDwarfFile` into the debug info of the final // output(s). - split_name.as_ptr().cast(), + split_name.as_c_char_ptr(), split_name.len(), kind, 0, @@ -1022,7 +1022,7 @@ fn build_field_di_node<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateMemberType( DIB(cx), owner, - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, @@ -1306,7 +1306,7 @@ fn build_generic_type_param_di_nodes<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( DIB(cx), None, - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), actual_type_di_node, ) @@ -1382,9 +1382,9 @@ pub(crate) fn build_global_var_di_node<'ll>( llvm::LLVMRustDIBuilderCreateStaticVariable( DIB(cx), Some(var_scope), - var_name.as_ptr().cast(), + var_name.as_c_char_ptr(), var_name.len(), - linkage_name.as_ptr().cast(), + linkage_name.as_c_char_ptr(), linkage_name.len(), file_metadata, line_number, @@ -1602,9 +1602,9 @@ pub(crate) fn create_vtable_di_node<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateStaticVariable( DIB(cx), NO_SCOPE_METADATA, - vtable_name.as_ptr().cast(), + vtable_name.as_c_char_ptr(), vtable_name.len(), - linkage_name.as_ptr().cast(), + linkage_name.as_c_char_ptr(), linkage_name.len(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs index 966788cf32f..5385d3a9212 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/cpp_like.rs @@ -11,7 +11,7 @@ use rustc_middle::ty::{self, AdtDef, CoroutineArgs, CoroutineArgsExt, Ty}; use rustc_target::abi::{Align, Endian, Size, TagEncoding, VariantIdx, Variants}; use smallvec::smallvec; -use crate::common::CodegenCx; +use crate::common::{AsCCharPtr, CodegenCx}; use crate::debuginfo::metadata::enums::DiscrResult; use crate::debuginfo::metadata::type_map::{self, Stub, UniqueTypeId}; use crate::debuginfo::metadata::{ @@ -359,7 +359,7 @@ fn build_single_variant_union_fields<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateStaticMemberType( DIB(cx), enum_type_di_node, - TAG_FIELD_NAME.as_ptr().cast(), + TAG_FIELD_NAME.as_c_char_ptr(), TAG_FIELD_NAME.len(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, @@ -537,7 +537,7 @@ fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateStaticMemberType( DIB(cx), wrapper_struct_type_di_node, - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, @@ -785,7 +785,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateMemberType( DIB(cx), enum_type_di_node, - field_name.as_ptr().cast(), + field_name.as_c_char_ptr(), field_name.len(), file_di_node, line_number, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs index fe1634146ff..4c848027b55 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/mod.rs @@ -13,7 +13,7 @@ use rustc_target::abi::{FieldIdx, TagEncoding, VariantIdx, Variants}; use super::type_map::{DINodeCreationResult, UniqueTypeId}; use super::{SmallVec, size_and_align_of}; -use crate::common::CodegenCx; +use crate::common::{AsCCharPtr, CodegenCx}; use crate::debuginfo::metadata::type_map::{self, Stub}; use crate::debuginfo::metadata::{ UNKNOWN_LINE_NUMBER, build_field_di_node, build_generic_type_param_di_nodes, type_di_node, @@ -106,7 +106,7 @@ fn build_enumeration_type_di_node<'ll, 'tcx>( let value = [value as u64, (value >> 64) as u64]; Some(llvm::LLVMRustDIBuilderCreateEnumerator( DIB(cx), - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), value.as_ptr(), size.bits() as libc::c_uint, @@ -119,7 +119,7 @@ fn build_enumeration_type_di_node<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateEnumerationType( DIB(cx), containing_scope, - type_name.as_ptr().cast(), + type_name.as_c_char_ptr(), type_name.len(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs index 5e7dbdd921a..b7400c5aeb2 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs @@ -10,7 +10,7 @@ use rustc_middle::ty::{self}; use rustc_target::abi::{Size, TagEncoding, VariantIdx, Variants}; use smallvec::smallvec; -use crate::common::CodegenCx; +use crate::common::{AsCCharPtr, CodegenCx}; use crate::debuginfo::metadata::type_map::{self, Stub, StubInfo, UniqueTypeId}; use crate::debuginfo::metadata::{ DINodeCreationResult, NO_GENERICS, SmallVec, UNKNOWN_LINE_NUMBER, file_metadata, @@ -244,7 +244,7 @@ fn build_enum_variant_part_di_node<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateVariantPart( DIB(cx), enum_type_di_node, - variant_part_name.as_ptr().cast(), + variant_part_name.as_c_char_ptr(), variant_part_name.len(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, @@ -253,7 +253,7 @@ fn build_enum_variant_part_di_node<'ll, 'tcx>( DIFlags::FlagZero, tag_member_di_node, create_DIArray(DIB(cx), &[]), - variant_part_unique_type_id_str.as_ptr().cast(), + variant_part_unique_type_id_str.as_c_char_ptr(), variant_part_unique_type_id_str.len(), ) }, @@ -327,7 +327,7 @@ fn build_discr_member_di_node<'ll, 'tcx>( Some(llvm::LLVMRustDIBuilderCreateMemberType( DIB(cx), containing_scope, - tag_name.as_ptr().cast(), + tag_name.as_c_char_ptr(), tag_name.len(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, @@ -399,7 +399,7 @@ fn build_enum_variant_member_di_node<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateVariantMemberType( DIB(cx), variant_part_di_node, - variant_member_info.variant_name.as_ptr().cast(), + variant_member_info.variant_name.as_c_char_ptr(), variant_member_info.variant_name.len(), file_di_node, line_number, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs index 714e3c0b145..d050dc9b406 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/type_map.rs @@ -9,7 +9,7 @@ use rustc_middle::ty::{ParamEnv, PolyExistentialTraitRef, Ty, TyCtxt}; use rustc_target::abi::{Align, Size, VariantIdx}; use super::{SmallVec, UNKNOWN_LINE_NUMBER, unknown_file_metadata}; -use crate::common::CodegenCx; +use crate::common::{AsCCharPtr, CodegenCx}; use crate::debuginfo::utils::{DIB, create_DIArray, debug_context}; use crate::llvm::debuginfo::{DIFlags, DIScope, DIType}; use crate::llvm::{self}; @@ -191,7 +191,7 @@ pub(super) fn stub<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateStructType( DIB(cx), containing_scope, - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, @@ -202,7 +202,7 @@ pub(super) fn stub<'ll, 'tcx>( empty_array, 0, vtable_holder, - unique_type_id_str.as_ptr().cast(), + unique_type_id_str.as_c_char_ptr(), unique_type_id_str.len(), ) } @@ -211,7 +211,7 @@ pub(super) fn stub<'ll, 'tcx>( llvm::LLVMRustDIBuilderCreateUnionType( DIB(cx), containing_scope, - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), unknown_file_metadata(cx), UNKNOWN_LINE_NUMBER, @@ -220,7 +220,7 @@ pub(super) fn stub<'ll, 'tcx>( flags, Some(empty_array), 0, - unique_type_id_str.as_ptr().cast(), + unique_type_id_str.as_c_char_ptr(), unique_type_id_str.len(), ) }, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index 3de4ca77e7d..dbc9074eea8 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -31,7 +31,7 @@ use self::namespace::mangled_name_of_instance; use self::utils::{DIB, create_DIArray, is_node_local_to_unit}; use crate::abi::FnAbi; use crate::builder::Builder; -use crate::common::CodegenCx; +use crate::common::{AsCCharPtr, CodegenCx}; use crate::llvm; use crate::llvm::debuginfo::{ DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DILocation, DISPFlags, DIScope, DIType, @@ -389,9 +389,9 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { llvm::LLVMRustDIBuilderCreateMethod( DIB(self), containing_scope, - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), - linkage_name.as_ptr().cast(), + linkage_name.as_c_char_ptr(), linkage_name.len(), file_metadata, loc.line, @@ -406,9 +406,9 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { llvm::LLVMRustDIBuilderCreateFunction( DIB(self), containing_scope, - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), - linkage_name.as_ptr().cast(), + linkage_name.as_c_char_ptr(), linkage_name.len(), file_metadata, loc.line, @@ -494,7 +494,7 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( DIB(cx), None, - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), actual_type_metadata, )) @@ -635,7 +635,7 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { DIB(self), dwarf_tag, scope_metadata, - name.as_ptr().cast(), + name.as_c_char_ptr(), name.len(), file_metadata, loc.line, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs index 3578755aae0..33d9bc23890 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/namespace.rs @@ -5,7 +5,7 @@ use rustc_hir::def_id::DefId; use rustc_middle::ty::{self, Instance}; use super::utils::{DIB, debug_context}; -use crate::common::CodegenCx; +use crate::common::{AsCCharPtr, CodegenCx}; use crate::llvm; use crate::llvm::debuginfo::DIScope; @@ -36,7 +36,7 @@ pub(crate) fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'l llvm::LLVMRustDIBuilderCreateNameSpace( DIB(cx), parent_scope, - namespace_name_string.as_ptr().cast(), + namespace_name_string.as_c_char_ptr(), namespace_name_string.len(), false, // ExportSymbols (only relevant for C++ anonymous namespaces) ) diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 33258cb46fa..d338c848754 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -20,6 +20,7 @@ use smallvec::SmallVec; use tracing::debug; use crate::abi::{FnAbi, FnAbiLlvmExt}; +use crate::common::AsCCharPtr; use crate::context::CodegenCx; use crate::llvm::AttributePlace::Function; use crate::llvm::Visibility; @@ -41,7 +42,7 @@ fn declare_raw_fn<'ll>( ) -> &'ll Value { debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty); let llfn = unsafe { - llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty) + llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_c_char_ptr(), name.len(), ty) }; llvm::SetFunctionCallConv(llfn, callconv); @@ -68,7 +69,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// return its Value instead. pub(crate) fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value { debug!("declare_global(name={:?})", name); - unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_ptr().cast(), name.len(), ty) } + unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_c_char_ptr(), name.len(), ty) } } /// Declare a C ABI function. @@ -209,7 +210,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// Gets declared value by name. pub(crate) fn get_declared_value(&self, name: &str) -> Option<&'ll Value> { debug!("get_declared_value(name={:?})", name); - unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_ptr().cast(), name.len()) } + unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_c_char_ptr(), name.len()) } } /// Gets defined or externally defined (AvailableExternally linkage) value by diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 6aac2eea81d..cabe6c031d3 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -17,13 +17,13 @@ pub use self::IntPredicate::*; pub use self::Linkage::*; pub use self::MetadataType::*; pub use self::RealPredicate::*; +pub use self::ffi::*; +use crate::common::AsCCharPtr; pub mod archive_ro; pub mod diagnostic; mod ffi; -pub use self::ffi::*; - impl LLVMRustResult { pub fn into_result(self) -> Result<(), ()> { match self { @@ -53,9 +53,9 @@ pub fn CreateAttrStringValue<'ll>(llcx: &'ll Context, attr: &str, value: &str) - unsafe { LLVMCreateStringAttribute( llcx, - attr.as_ptr().cast(), + attr.as_c_char_ptr(), attr.len().try_into().unwrap(), - value.as_ptr().cast(), + value.as_c_char_ptr(), value.len().try_into().unwrap(), ) } @@ -65,7 +65,7 @@ pub fn CreateAttrString<'ll>(llcx: &'ll Context, attr: &str) -> &'ll Attribute { unsafe { LLVMCreateStringAttribute( llcx, - attr.as_ptr().cast(), + attr.as_c_char_ptr(), attr.len().try_into().unwrap(), std::ptr::null(), 0, @@ -294,7 +294,7 @@ pub fn get_value_name(value: &Value) -> &[u8] { /// Safe wrapper for `LLVMSetValueName2` from a byte slice pub fn set_value_name(value: &Value, name: &[u8]) { unsafe { - let data = name.as_ptr().cast(); + let data = name.as_c_char_ptr(); LLVMSetValueName2(value, data, name.len()); } } -- cgit 1.4.1-3-g733a5 From 82bfe053095be8f5b159c82b8161e39b351a1043 Mon Sep 17 00:00:00 2001 From: ChrisCho-H Date: Mon, 28 Oct 2024 20:16:35 +0900 Subject: refactor: cleaner check to return None --- compiler/rustc_codegen_llvm/src/llvm_util.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index aa38c02289d..9adb1299b3d 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -698,12 +698,9 @@ fn backend_feature_name<'a>(sess: &Session, s: &'a str) -> Option<&'a str> { let feature = s .strip_prefix(&['+', '-'][..]) .unwrap_or_else(|| sess.dcx().emit_fatal(InvalidTargetFeaturePrefix { feature: s })); - if s.is_empty() { - return None; - } // Rustc-specific feature requests like `+crt-static` or `-crt-static` // are not passed down to LLVM. - if RUSTC_SPECIFIC_FEATURES.contains(&feature) { + if s.is_empty() || RUSTC_SPECIFIC_FEATURES.contains(&feature) { return None; } Some(feature) -- cgit 1.4.1-3-g733a5 From 746b675c5aabc7a61443f16a37223720657544d2 Mon Sep 17 00:00:00 2001 From: klensy Date: Mon, 7 Oct 2024 22:22:51 +0300 Subject: fix clippy::clone_on_ref_ptr for compiler --- Cargo.lock | 1 + compiler/rustc_ast/src/token.rs | 2 +- compiler/rustc_ast_lowering/src/expr.rs | 21 +++++----- compiler/rustc_ast_lowering/src/lib.rs | 2 +- compiler/rustc_ast_lowering/src/path.rs | 2 +- compiler/rustc_ast_pretty/Cargo.toml | 1 + compiler/rustc_ast_pretty/src/pprust/state.rs | 3 +- compiler/rustc_borrowck/src/nll.rs | 4 +- compiler/rustc_borrowck/src/region_infer/mod.rs | 4 +- compiler/rustc_borrowck/src/type_check/mod.rs | 4 +- compiler/rustc_codegen_llvm/src/back/lto.rs | 2 +- compiler/rustc_codegen_ssa/src/back/write.rs | 4 +- compiler/rustc_codegen_ssa/src/base.rs | 6 +-- compiler/rustc_driver_impl/src/lib.rs | 2 +- .../src/annotate_snippet_emitter_writer.rs | 2 +- compiler/rustc_errors/src/emitter.rs | 15 +++++--- compiler/rustc_errors/src/json.rs | 4 +- compiler/rustc_expand/src/mbe/macro_parser.rs | 2 +- compiler/rustc_expand/src/mbe/transcribe.rs | 3 +- compiler/rustc_infer/src/infer/opaque_types/mod.rs | 2 +- compiler/rustc_interface/src/queries.rs | 2 +- compiler/rustc_metadata/src/rmeta/encoder.rs | 4 +- .../rustc_middle/src/middle/debugger_visualizer.rs | 2 +- compiler/rustc_middle/src/query/on_disk_cache.rs | 45 ++++++++++------------ compiler/rustc_parse/src/parser/diagnostics.rs | 3 +- compiler/rustc_query_system/src/dep_graph/graph.rs | 2 +- compiler/rustc_query_system/src/query/job.rs | 2 +- compiler/rustc_resolve/src/lib.rs | 6 +-- compiler/rustc_resolve/src/macros.rs | 2 +- compiler/rustc_session/src/parse.rs | 6 +-- compiler/rustc_session/src/session.rs | 5 ++- compiler/rustc_span/src/caching_source_map_view.rs | 14 +++---- compiler/rustc_span/src/source_map.rs | 12 +++--- 33 files changed, 99 insertions(+), 92 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/Cargo.lock b/Cargo.lock index 01e814e7d7f..cebd54822e7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3301,6 +3301,7 @@ version = "0.0.0" dependencies = [ "itertools", "rustc_ast", + "rustc_data_structures", "rustc_lexer", "rustc_span", "thin-vec", diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs index 1d6abbef06c..3b9edef0615 100644 --- a/compiler/rustc_ast/src/token.rs +++ b/compiler/rustc_ast/src/token.rs @@ -368,7 +368,7 @@ impl Clone for TokenKind { // a copy. This is faster than the `derive(Clone)` version which has a // separate path for every variant. match self { - Interpolated(nt) => Interpolated(nt.clone()), + Interpolated(nt) => Interpolated(Lrc::clone(nt)), _ => unsafe { std::ptr::read(self) }, } } diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs index a1a16d0ca26..5b6b4eaa039 100644 --- a/compiler/rustc_ast_lowering/src/expr.rs +++ b/compiler/rustc_ast_lowering/src/expr.rs @@ -3,6 +3,7 @@ use std::assert_matches::assert_matches; use rustc_ast::ptr::P as AstP; use rustc_ast::*; use rustc_data_structures::stack::ensure_sufficient_stack; +use rustc_data_structures::sync::Lrc; use rustc_hir as hir; use rustc_hir::HirId; use rustc_hir::def::{DefKind, Res}; @@ -143,7 +144,7 @@ impl<'hir> LoweringContext<'_, 'hir> { ExprKind::IncludedBytes(bytes) => { let lit = self.arena.alloc(respan( self.lower_span(e.span), - LitKind::ByteStr(bytes.clone(), StrStyle::Cooked), + LitKind::ByteStr(Lrc::clone(bytes), StrStyle::Cooked), )); hir::ExprKind::Lit(lit) } @@ -521,7 +522,7 @@ impl<'hir> LoweringContext<'_, 'hir> { this.mark_span_with_reason( DesugaringKind::TryBlock, expr.span, - Some(this.allow_try_trait.clone()), + Some(Lrc::clone(&this.allow_try_trait)), ), expr, ) @@ -529,7 +530,7 @@ impl<'hir> LoweringContext<'_, 'hir> { let try_span = this.mark_span_with_reason( DesugaringKind::TryBlock, this.tcx.sess.source_map().end_point(body.span), - Some(this.allow_try_trait.clone()), + Some(Lrc::clone(&this.allow_try_trait)), ); (try_span, this.expr_unit(try_span)) @@ -638,7 +639,7 @@ impl<'hir> LoweringContext<'_, 'hir> { let unstable_span = self.mark_span_with_reason( DesugaringKind::Async, self.lower_span(span), - Some(self.allow_gen_future.clone()), + Some(Lrc::clone(&self.allow_gen_future)), ); let resume_ty = self.make_lang_item_qpath(hir::LangItem::ResumeTy, unstable_span, None); @@ -724,7 +725,7 @@ impl<'hir> LoweringContext<'_, 'hir> { let unstable_span = self.mark_span_with_reason( DesugaringKind::Async, span, - Some(self.allow_gen_future.clone()), + Some(Lrc::clone(&self.allow_gen_future)), ); self.lower_attrs(inner_hir_id, &[Attribute { kind: AttrKind::Normal(ptr::P(NormalAttr::from_ident(Ident::new( @@ -800,13 +801,13 @@ impl<'hir> LoweringContext<'_, 'hir> { let features = match await_kind { FutureKind::Future => None, - FutureKind::AsyncIterator => Some(self.allow_for_await.clone()), + FutureKind::AsyncIterator => Some(Lrc::clone(&self.allow_for_await)), }; let span = self.mark_span_with_reason(DesugaringKind::Await, await_kw_span, features); let gen_future_span = self.mark_span_with_reason( DesugaringKind::Await, full_span, - Some(self.allow_gen_future.clone()), + Some(Lrc::clone(&self.allow_gen_future)), ); let expr_hir_id = expr.hir_id; @@ -1812,13 +1813,13 @@ impl<'hir> LoweringContext<'_, 'hir> { let unstable_span = self.mark_span_with_reason( DesugaringKind::QuestionMark, span, - Some(self.allow_try_trait.clone()), + Some(Lrc::clone(&self.allow_try_trait)), ); let try_span = self.tcx.sess.source_map().end_point(span); let try_span = self.mark_span_with_reason( DesugaringKind::QuestionMark, try_span, - Some(self.allow_try_trait.clone()), + Some(Lrc::clone(&self.allow_try_trait)), ); // `Try::branch()` @@ -1912,7 +1913,7 @@ impl<'hir> LoweringContext<'_, 'hir> { let unstable_span = self.mark_span_with_reason( DesugaringKind::YeetExpr, span, - Some(self.allow_try_trait.clone()), + Some(Lrc::clone(&self.allow_try_trait)), ); let from_yeet_expr = self.wrap_in_try_constructor( diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs index 34b8137aea8..15221b3eba0 100644 --- a/compiler/rustc_ast_lowering/src/lib.rs +++ b/compiler/rustc_ast_lowering/src/lib.rs @@ -1865,7 +1865,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { CoroutineKind::Async { return_impl_trait_id, .. } => (return_impl_trait_id, None), CoroutineKind::Gen { return_impl_trait_id, .. } => (return_impl_trait_id, None), CoroutineKind::AsyncGen { return_impl_trait_id, .. } => { - (return_impl_trait_id, Some(self.allow_async_iterator.clone())) + (return_impl_trait_id, Some(Lrc::clone(&self.allow_async_iterator))) } }; diff --git a/compiler/rustc_ast_lowering/src/path.rs b/compiler/rustc_ast_lowering/src/path.rs index 258655de486..67159068916 100644 --- a/compiler/rustc_ast_lowering/src/path.rs +++ b/compiler/rustc_ast_lowering/src/path.rs @@ -70,7 +70,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { let bound_modifier_allowed_features = if let Res::Def(DefKind::Trait, async_def_id) = res && self.tcx.async_fn_trait_kind_from_def_id(async_def_id).is_some() { - Some(self.allow_async_fn_traits.clone()) + Some(Lrc::clone(&self.allow_async_fn_traits)) } else { None }; diff --git a/compiler/rustc_ast_pretty/Cargo.toml b/compiler/rustc_ast_pretty/Cargo.toml index 9ae5c9b3cec..f290fedcd8b 100644 --- a/compiler/rustc_ast_pretty/Cargo.toml +++ b/compiler/rustc_ast_pretty/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" # tidy-alphabetical-start itertools = "0.12" rustc_ast = { path = "../rustc_ast" } +rustc_data_structures = { path = "../rustc_data_structures" } rustc_lexer = { path = "../rustc_lexer" } rustc_span = { path = "../rustc_span" } thin-vec = "0.2.12" diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs index 1e18f0779f0..de9f5187be7 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state.rs @@ -21,6 +21,7 @@ use rustc_ast::{ GenericBound, InlineAsmOperand, InlineAsmOptions, InlineAsmRegOrRegClass, InlineAsmTemplatePiece, PatKind, RangeEnd, RangeSyntax, Safety, SelfKind, Term, attr, }; +use rustc_data_structures::sync::Lrc; use rustc_span::edition::Edition; use rustc_span::source_map::{SourceMap, Spanned}; use rustc_span::symbol::{Ident, IdentPrinter, Symbol, kw, sym}; @@ -105,7 +106,7 @@ fn split_block_comment_into_lines(text: &str, col: CharPos) -> Vec { fn gather_comments(sm: &SourceMap, path: FileName, src: String) -> Vec { let sm = SourceMap::new(sm.path_mapping().clone()); let source_file = sm.new_source_file(path, src); - let text = (*source_file.src.as_ref().unwrap()).clone(); + let text = Lrc::clone(&(*source_file.src.as_ref().unwrap())); let text: &str = text.as_str(); let start_bpos = source_file.start_pos; diff --git a/compiler/rustc_borrowck/src/nll.rs b/compiler/rustc_borrowck/src/nll.rs index 3674053409b..f76603d5679 100644 --- a/compiler/rustc_borrowck/src/nll.rs +++ b/compiler/rustc_borrowck/src/nll.rs @@ -107,13 +107,13 @@ pub(crate) fn compute_regions<'a, 'tcx>( param_env, body, promoted, - universal_regions.clone(), + Rc::clone(&universal_regions), location_table, borrow_set, &mut all_facts, flow_inits, move_data, - elements.clone(), + Rc::clone(&elements), upvars, ); diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs index e85f529bf0e..d5c2796932e 100644 --- a/compiler/rustc_borrowck/src/region_infer/mod.rs +++ b/compiler/rustc_borrowck/src/region_infer/mod.rs @@ -733,7 +733,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { } // Now take member constraints into account. - let member_constraints = self.member_constraints.clone(); + let member_constraints = Rc::clone(&self.member_constraints); for m_c_i in member_constraints.indices(scc_a) { self.apply_member_constraint(scc_a, m_c_i, member_constraints.choice_regions(m_c_i)); } @@ -1679,7 +1679,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { infcx: &InferCtxt<'tcx>, errors_buffer: &mut RegionErrors<'tcx>, ) { - let member_constraints = self.member_constraints.clone(); + let member_constraints = Rc::clone(&self.member_constraints); for m_c_i in member_constraints.all_indices() { debug!(?m_c_i); let m_c = &member_constraints[m_c_i]; diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs index a544d88e832..26919bfd488 100644 --- a/compiler/rustc_borrowck/src/type_check/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/mod.rs @@ -134,7 +134,7 @@ pub(crate) fn type_check<'a, 'tcx>( let mut constraints = MirTypeckRegionConstraints { placeholder_indices: PlaceholderIndices::default(), placeholder_index_to_region: IndexVec::default(), - liveness_constraints: LivenessValues::with_specific_points(elements.clone()), + liveness_constraints: LivenessValues::with_specific_points(Rc::clone(&elements)), outlives_constraints: OutlivesConstraintSet::default(), member_constraints: MemberConstraintSet::default(), type_tests: Vec::default(), @@ -150,7 +150,7 @@ pub(crate) fn type_check<'a, 'tcx>( infcx, param_env, implicit_region_bound, - universal_regions.clone(), + Rc::clone(&universal_regions), &mut constraints, ); diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 1f7a923dd2c..74bc0eced32 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -569,7 +569,7 @@ fn thin_lto( info!(" - {}: re-compiled", module_name); opt_jobs.push(LtoModuleCodegen::Thin(ThinModule { - shared: shared.clone(), + shared: Arc::clone(&shared), idx: module_index, })); } diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index e3d11cfaf4f..8445d16befb 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -514,7 +514,7 @@ pub(crate) fn start_async_codegen( future: Some(coordinator_thread), phantom: PhantomData, }, - output_filenames: tcx.output_filenames(()).clone(), + output_filenames: Arc::clone(tcx.output_filenames(())), } } @@ -1203,7 +1203,7 @@ fn start_executing_work( coordinator_send, expanded_args: tcx.sess.expanded_args.clone(), diag_emitter: shared_emitter.clone(), - output_filenames: tcx.output_filenames(()).clone(), + output_filenames: Arc::clone(tcx.output_filenames(())), regular_module_config: regular_config, metadata_module_config: metadata_config, allocator_module_config: allocator_config, diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index a726ee73aaa..b954633f453 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -7,7 +7,7 @@ use rustc_ast::expand::allocator::{ALLOCATOR_METHODS, AllocatorKind, global_fn_n use rustc_attr as attr; use rustc_data_structures::fx::{FxHashMap, FxIndexSet}; use rustc_data_structures::profiling::{get_resident_set_size, print_time_passes_entry}; -use rustc_data_structures::sync::par_map; +use rustc_data_structures::sync::{Lrc, par_map}; use rustc_data_structures::unord::UnordMap; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_hir::lang_items::LangItem; @@ -923,7 +923,7 @@ impl CrateInfo { crate_name: UnordMap::with_capacity(n_crates), used_crates, used_crate_source: UnordMap::with_capacity(n_crates), - dependency_formats: tcx.dependency_formats(()).clone(), + dependency_formats: Lrc::clone(tcx.dependency_formats(())), windows_subsystem, natvis_debugger_visualizers: Default::default(), }; @@ -936,7 +936,7 @@ impl CrateInfo { info.crate_name.insert(cnum, tcx.crate_name(cnum)); let used_crate_source = tcx.used_crate_source(cnum); - info.used_crate_source.insert(cnum, used_crate_source.clone()); + info.used_crate_source.insert(cnum, Lrc::clone(used_crate_source)); if tcx.is_profiler_runtime(cnum) { info.profiler_runtime = Some(cnum); } diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs index a59dea557bb..e2585c02388 100644 --- a/compiler/rustc_driver_impl/src/lib.rs +++ b/compiler/rustc_driver_impl/src/lib.rs @@ -1395,7 +1395,7 @@ pub fn install_ice_hook( } let using_internal_features = Arc::new(std::sync::atomic::AtomicBool::default()); - let using_internal_features_hook = using_internal_features.clone(); + let using_internal_features_hook = Arc::clone(&using_internal_features); panic::update_hook(Box::new( move |default_hook: &(dyn Fn(&PanicHookInfo<'_>) + Send + Sync + 'static), info: &PanicHookInfo<'_>| { diff --git a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs index 335432c9cfe..b4a651b10b1 100644 --- a/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs +++ b/compiler/rustc_errors/src/annotate_snippet_emitter_writer.rs @@ -173,7 +173,7 @@ impl AnnotateSnippetEmitter { source_map.ensure_source_file_source_present(&file); ( format!("{}", source_map.filename_for_diagnostics(&file.name)), - source_string(file.clone(), &line), + source_string(Lrc::clone(&file), &line), line.line_index, line.annotations, ) diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs index 0ccc71ae06c..6552cf224ea 100644 --- a/compiler/rustc_errors/src/emitter.rs +++ b/compiler/rustc_errors/src/emitter.rs @@ -1555,7 +1555,7 @@ impl HumanEmitter { // Get the left-side margin to remove it let mut whitespace_margin = usize::MAX; for line_idx in 0..annotated_file.lines.len() { - let file = annotated_file.file.clone(); + let file = Lrc::clone(&annotated_file.file); let line = &annotated_file.lines[line_idx]; if let Some(source_string) = line.line_index.checked_sub(1).and_then(|l| file.get_line(l)) @@ -1646,7 +1646,7 @@ impl HumanEmitter { let depths = self.render_source_line( &mut buffer, - annotated_file.file.clone(), + Lrc::clone(&annotated_file.file), &annotated_file.lines[line_idx], width_offset, code_offset, @@ -2529,7 +2529,12 @@ impl FileWithAnnotatedLines { // | | | // | |______foo // | baz - add_annotation_to_file(&mut output, file.clone(), ann.line_start, ann.as_start()); + add_annotation_to_file( + &mut output, + Lrc::clone(&file), + ann.line_start, + ann.as_start(), + ); // 4 is the minimum vertical length of a multiline span when presented: two lines // of code and two lines of underline. This is not true for the special case where // the beginning doesn't have an underline, but the current logic seems to be @@ -2545,11 +2550,11 @@ impl FileWithAnnotatedLines { .unwrap_or(ann.line_start); for line in ann.line_start + 1..until { // Every `|` that joins the beginning of the span (`___^`) to the end (`|__^`). - add_annotation_to_file(&mut output, file.clone(), line, ann.as_line()); + add_annotation_to_file(&mut output, Lrc::clone(&file), line, ann.as_line()); } let line_end = ann.line_end - 1; if middle < line_end { - add_annotation_to_file(&mut output, file.clone(), line_end, ann.as_line()); + add_annotation_to_file(&mut output, Lrc::clone(&file), line_end, ann.as_line()); } } else { end_ann.annotation_type = AnnotationType::Singleline; diff --git a/compiler/rustc_errors/src/json.rs b/compiler/rustc_errors/src/json.rs index 1534e256520..91e2b9996cd 100644 --- a/compiler/rustc_errors/src/json.rs +++ b/compiler/rustc_errors/src/json.rs @@ -367,9 +367,9 @@ impl Diagnostic { ColorConfig::Always | ColorConfig::Auto => dst = Box::new(termcolor::Ansi::new(dst)), ColorConfig::Never => {} } - HumanEmitter::new(dst, je.fallback_bundle.clone()) + HumanEmitter::new(dst, Lrc::clone(&je.fallback_bundle)) .short_message(short) - .sm(Some(je.sm.clone())) + .sm(Some(Lrc::clone(&je.sm))) .fluent_bundle(je.fluent_bundle.clone()) .diagnostic_width(je.diagnostic_width) .macro_backtrace(je.macro_backtrace) diff --git a/compiler/rustc_expand/src/mbe/macro_parser.rs b/compiler/rustc_expand/src/mbe/macro_parser.rs index 3903203da3d..a08db612bbe 100644 --- a/compiler/rustc_expand/src/mbe/macro_parser.rs +++ b/compiler/rustc_expand/src/mbe/macro_parser.rs @@ -622,7 +622,7 @@ impl TtParser { // possible next positions into `next_mps`. After some post-processing, the contents of // `next_mps` replenish `cur_mps` and we start over again. self.cur_mps.clear(); - self.cur_mps.push(MatcherPos { idx: 0, matches: self.empty_matches.clone() }); + self.cur_mps.push(MatcherPos { idx: 0, matches: Rc::clone(&self.empty_matches) }); loop { self.next_mps.clear(); diff --git a/compiler/rustc_expand/src/mbe/transcribe.rs b/compiler/rustc_expand/src/mbe/transcribe.rs index 34811ca2b35..b77d02e630a 100644 --- a/compiler/rustc_expand/src/mbe/transcribe.rs +++ b/compiler/rustc_expand/src/mbe/transcribe.rs @@ -5,6 +5,7 @@ use rustc_ast::mut_visit::{self, MutVisitor}; use rustc_ast::token::{self, Delimiter, IdentIsRaw, Lit, LitKind, Nonterminal, Token, TokenKind}; use rustc_ast::tokenstream::{DelimSpacing, DelimSpan, Spacing, TokenStream, TokenTree}; use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sync::Lrc; use rustc_errors::{Diag, DiagCtxtHandle, PResult, pluralize}; use rustc_parse::lexer::nfc_normalize; use rustc_parse::parser::ParseNtResult; @@ -293,7 +294,7 @@ pub(super) fn transcribe<'a>( // `Delimiter::Invisible` to maintain parsing priorities. // `Interpolated` is currently used for such groups in rustc parser. marker.visit_span(&mut sp); - TokenTree::token_alone(token::Interpolated(nt.clone()), sp) + TokenTree::token_alone(token::Interpolated(Lrc::clone(nt)), sp) } MatchedSeq(..) => { // We were unable to descend far enough. This is an error. diff --git a/compiler/rustc_infer/src/infer/opaque_types/mod.rs b/compiler/rustc_infer/src/infer/opaque_types/mod.rs index 853ae6d2641..c38b7114e43 100644 --- a/compiler/rustc_infer/src/infer/opaque_types/mod.rs +++ b/compiler/rustc_infer/src/infer/opaque_types/mod.rs @@ -364,7 +364,7 @@ impl<'tcx> InferCtxt<'tcx> { span, concrete_ty, r, - choice_regions.clone(), + Lrc::clone(&choice_regions), ) }, }); diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs index 3a1833452d4..b6837ec764f 100644 --- a/compiler/rustc_interface/src/queries.rs +++ b/compiler/rustc_interface/src/queries.rs @@ -142,7 +142,7 @@ impl Linker { Ok(Linker { dep_graph: tcx.dep_graph.clone(), - output_filenames: tcx.output_filenames(()).clone(), + output_filenames: Arc::clone(tcx.output_filenames(())), crate_hash: if tcx.needs_crate_hash() { Some(tcx.crate_hash(LOCAL_CRATE)) } else { diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs index 47f7a8b7c20..b5ac302c597 100644 --- a/compiler/rustc_metadata/src/rmeta/encoder.rs +++ b/compiler/rustc_metadata/src/rmeta/encoder.rs @@ -278,7 +278,7 @@ impl<'a, 'tcx> Encodable> for SpanData { let source_map = s.tcx.sess.source_map(); let source_file_index = source_map.lookup_source_file_idx(self.lo); s.source_file_cache = - (source_map.files()[source_file_index].clone(), source_file_index); + (Lrc::clone(&source_map.files()[source_file_index]), source_file_index); } let (ref source_file, source_file_index) = s.source_file_cache; debug_assert!(source_file.contains(self.lo)); @@ -2275,7 +2275,7 @@ pub fn encode_metadata(tcx: TyCtxt<'_>, path: &Path) { encoder.emit_raw_bytes(&0u64.to_le_bytes()); let source_map_files = tcx.sess.source_map().files(); - let source_file_cache = (source_map_files[0].clone(), 0); + let source_file_cache = (Lrc::clone(&source_map_files[0]), 0); let required_source_files = Some(FxIndexSet::default()); drop(source_map_files); diff --git a/compiler/rustc_middle/src/middle/debugger_visualizer.rs b/compiler/rustc_middle/src/middle/debugger_visualizer.rs index 615a7402139..c241c06fce4 100644 --- a/compiler/rustc_middle/src/middle/debugger_visualizer.rs +++ b/compiler/rustc_middle/src/middle/debugger_visualizer.rs @@ -32,7 +32,7 @@ impl DebuggerVisualizerFile { pub fn path_erased(&self) -> Self { DebuggerVisualizerFile { - src: self.src.clone(), + src: Lrc::clone(&self.src), visualizer_type: self.visualizer_type, path: None, } diff --git a/compiler/rustc_middle/src/query/on_disk_cache.rs b/compiler/rustc_middle/src/query/on_disk_cache.rs index 35a16684615..8b77a4a81ca 100644 --- a/compiler/rustc_middle/src/query/on_disk_cache.rs +++ b/compiler/rustc_middle/src/query/on_disk_cache.rs @@ -472,32 +472,27 @@ impl<'a, 'tcx> CacheDecoder<'a, 'tcx> { let CacheDecoder { tcx, file_index_to_file, file_index_to_stable_id, source_map, .. } = *self; - file_index_to_file - .borrow_mut() - .entry(index) - .or_insert_with(|| { - let source_file_id = &file_index_to_stable_id[&index]; - let source_file_cnum = - tcx.stable_crate_id_to_crate_num(source_file_id.stable_crate_id); - - // If this `SourceFile` is from a foreign crate, then make sure - // that we've imported all of the source files from that crate. - // This has usually already been done during macro invocation. - // However, when encoding query results like `TypeckResults`, - // we might encode an `AdtDef` for a foreign type (because it - // was referenced in the body of the function). There is no guarantee - // that we will load the source files from that crate during macro - // expansion, so we use `import_source_files` to ensure that the foreign - // source files are actually imported before we call `source_file_by_stable_id`. - if source_file_cnum != LOCAL_CRATE { - self.tcx.import_source_files(source_file_cnum); - } + Lrc::clone(file_index_to_file.borrow_mut().entry(index).or_insert_with(|| { + let source_file_id = &file_index_to_stable_id[&index]; + let source_file_cnum = tcx.stable_crate_id_to_crate_num(source_file_id.stable_crate_id); + + // If this `SourceFile` is from a foreign crate, then make sure + // that we've imported all of the source files from that crate. + // This has usually already been done during macro invocation. + // However, when encoding query results like `TypeckResults`, + // we might encode an `AdtDef` for a foreign type (because it + // was referenced in the body of the function). There is no guarantee + // that we will load the source files from that crate during macro + // expansion, so we use `import_source_files` to ensure that the foreign + // source files are actually imported before we call `source_file_by_stable_id`. + if source_file_cnum != LOCAL_CRATE { + self.tcx.import_source_files(source_file_cnum); + } - source_map - .source_file_by_stable_id(source_file_id.stable_source_file_id) - .expect("failed to lookup `SourceFile` in new context") - }) - .clone() + source_map + .source_file_by_stable_id(source_file_id.stable_source_file_id) + .expect("failed to lookup `SourceFile` in new context") + })) } } diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs index a9384501547..a1fe5508970 100644 --- a/compiler/rustc_parse/src/parser/diagnostics.rs +++ b/compiler/rustc_parse/src/parser/diagnostics.rs @@ -14,6 +14,7 @@ use rustc_ast::{ }; use rustc_ast_pretty::pprust; use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::sync::Lrc; use rustc_errors::{ Applicability, Diag, DiagCtxtHandle, ErrorGuaranteed, FatalError, PErr, PResult, Subdiagnostic, Suggestions, pluralize, @@ -2437,7 +2438,7 @@ impl<'a> Parser<'a> { let mut labels = vec![]; while let TokenKind::Interpolated(nt) = &tok.kind { let tokens = nt.tokens(); - labels.push(nt.clone()); + labels.push(Lrc::clone(nt)); if let Some(tokens) = tokens && let tokens = tokens.to_attr_token_stream() && let tokens = tokens.0.deref() diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index cd7725d2d70..5e30f17d626 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -134,7 +134,7 @@ impl DepGraph { encoder, record_graph, record_stats, - prev_graph.clone(), + Arc::clone(&prev_graph), ); let colors = DepNodeColorMap::new(prev_graph_node_count); diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index ca3efc11201..5af41b9e687 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -237,7 +237,7 @@ impl QueryLatch { // the `wait` call below, by 1) the `set` method or 2) by deadlock detection. // Both of these will remove it from the `waiters` list before resuming // this thread. - info.waiters.push(waiter.clone()); + info.waiters.push(Arc::clone(waiter)); // If this detects a deadlock and the deadlock handler wants to resume this thread // we have to be in the `wait` call. This is ensured by the deadlock handler diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs index 35d491cfc18..9abb3180388 100644 --- a/compiler/rustc_resolve/src/lib.rs +++ b/compiler/rustc_resolve/src/lib.rs @@ -1694,9 +1694,9 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { fn dummy_ext(&self, macro_kind: MacroKind) -> Lrc { match macro_kind { - MacroKind::Bang => self.dummy_ext_bang.clone(), - MacroKind::Derive => self.dummy_ext_derive.clone(), - MacroKind::Attr => self.non_macro_attr.ext.clone(), + MacroKind::Bang => Lrc::clone(&self.dummy_ext_bang), + MacroKind::Derive => Lrc::clone(&self.dummy_ext_derive), + MacroKind::Attr => Lrc::clone(&self.non_macro_attr.ext), } } diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs index a9ebffea8a1..1a655abaf16 100644 --- a/compiler/rustc_resolve/src/macros.rs +++ b/compiler/rustc_resolve/src/macros.rs @@ -826,7 +826,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { } _ => None, }, - None => self.get_macro(res).map(|macro_data| macro_data.ext.clone()), + None => self.get_macro(res).map(|macro_data| Lrc::clone(¯o_data.ext)), }; Ok((ext, res)) } diff --git a/compiler/rustc_session/src/parse.rs b/compiler/rustc_session/src/parse.rs index 3d44810e7dd..f20ae85b8e8 100644 --- a/compiler/rustc_session/src/parse.rs +++ b/compiler/rustc_session/src/parse.rs @@ -241,7 +241,7 @@ impl ParseSess { let sm = Lrc::new(SourceMap::new(FilePathMapping::empty())); let emitter = Box::new( HumanEmitter::new(stderr_destination(ColorConfig::Auto), fallback_bundle) - .sm(Some(sm.clone())), + .sm(Some(Lrc::clone(&sm))), ); let dcx = DiagCtxt::new(emitter); ParseSess::with_dcx(dcx, sm) @@ -278,7 +278,7 @@ impl ParseSess { let sm = Lrc::new(SourceMap::new(FilePathMapping::empty())); let emitter = Box::new(HumanEmitter::new( stderr_destination(ColorConfig::Auto), - fallback_bundle.clone(), + Lrc::clone(&fallback_bundle), )); let fatal_dcx = DiagCtxt::new(emitter); let dcx = DiagCtxt::new(Box::new(SilentEmitter { @@ -297,7 +297,7 @@ impl ParseSess { } pub fn clone_source_map(&self) -> Lrc { - self.source_map.clone() + Lrc::clone(&self.source_map) } pub fn buffer_lint( diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index 1963cf4eb7c..45434534c75 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -1036,7 +1036,8 @@ pub fn build_session( sopts.unstable_opts.translate_directionality_markers, ); let source_map = rustc_span::source_map::get_source_map().unwrap(); - let emitter = default_emitter(&sopts, registry, source_map.clone(), bundle, fallback_bundle); + let emitter = + default_emitter(&sopts, registry, Lrc::clone(&source_map), bundle, fallback_bundle); let mut dcx = DiagCtxt::new(emitter).with_flags(sopts.unstable_opts.dcx_flags(can_emit_warnings)); @@ -1079,7 +1080,7 @@ pub fn build_session( let target_tlib_path = if host_triple == target_triple { // Use the same `SearchPath` if host and target triple are identical to avoid unnecessary // rescanning of the target lib path and an unnecessary allocation. - host_tlib_path.clone() + Lrc::clone(&host_tlib_path) } else { Lrc::new(SearchPath::from_sysroot_and_triple(&sysroot, target_triple)) }; diff --git a/compiler/rustc_span/src/caching_source_map_view.rs b/compiler/rustc_span/src/caching_source_map_view.rs index 26aa782e5d7..9f977b98b82 100644 --- a/compiler/rustc_span/src/caching_source_map_view.rs +++ b/compiler/rustc_span/src/caching_source_map_view.rs @@ -63,7 +63,7 @@ pub struct CachingSourceMapView<'sm> { impl<'sm> CachingSourceMapView<'sm> { pub fn new(source_map: &'sm SourceMap) -> CachingSourceMapView<'sm> { let files = source_map.files(); - let first_file = files[0].clone(); + let first_file = Lrc::clone(&files[0]); let entry = CacheEntry { time_stamp: 0, line_number: 0, @@ -92,7 +92,7 @@ impl<'sm> CachingSourceMapView<'sm> { cache_entry.touch(self.time_stamp); let col = RelativeBytePos(pos.to_u32() - cache_entry.line.start.to_u32()); - return Some((cache_entry.file.clone(), cache_entry.line_number, col)); + return Some((Lrc::clone(&cache_entry.file), cache_entry.line_number, col)); } // No cache hit ... @@ -109,7 +109,7 @@ impl<'sm> CachingSourceMapView<'sm> { cache_entry.update(new_file_and_idx, pos, self.time_stamp); let col = RelativeBytePos(pos.to_u32() - cache_entry.line.start.to_u32()); - Some((cache_entry.file.clone(), cache_entry.line_number, col)) + Some((Lrc::clone(&cache_entry.file), cache_entry.line_number, col)) } pub fn span_data_to_lines_and_cols( @@ -133,7 +133,7 @@ impl<'sm> CachingSourceMapView<'sm> { } ( - lo.file.clone(), + Lrc::clone(&lo.file), lo.line_number, span_data.lo - lo.line.start, hi.line_number, @@ -181,7 +181,7 @@ impl<'sm> CachingSourceMapView<'sm> { lo.update(new_file_and_idx, span_data.lo, self.time_stamp); if !lo.line.contains(&span_data.hi) { - let new_file_and_idx = Some((lo.file.clone(), lo.file_index)); + let new_file_and_idx = Some((Lrc::clone(&lo.file), lo.file_index)); let next_oldest = self.oldest_cache_entry_index_avoid(oldest); let hi = &mut self.line_cache[next_oldest]; hi.update(new_file_and_idx, span_data.hi, self.time_stamp); @@ -227,7 +227,7 @@ impl<'sm> CachingSourceMapView<'sm> { assert_eq!(lo.file_index, hi.file_index); Some(( - lo.file.clone(), + Lrc::clone(&lo.file), lo.line_number, span_data.lo - lo.line.start, hi.line_number, @@ -277,7 +277,7 @@ impl<'sm> CachingSourceMapView<'sm> { let file = &self.source_map.files()[file_idx]; if file_contains(file, pos) { - return Some((file.clone(), file_idx)); + return Some((Lrc::clone(file), file_idx)); } } diff --git a/compiler/rustc_span/src/source_map.rs b/compiler/rustc_span/src/source_map.rs index 8a023305937..f36bb38623e 100644 --- a/compiler/rustc_span/src/source_map.rs +++ b/compiler/rustc_span/src/source_map.rs @@ -286,8 +286,8 @@ impl SourceMap { }); let file = Lrc::new(file); - files.source_files.push(file.clone()); - files.stable_id_to_source_file.insert(file_id, file.clone()); + files.source_files.push(Lrc::clone(&file)); + files.stable_id_to_source_file.insert(file_id, Lrc::clone(&file)); Ok(file) } @@ -386,7 +386,7 @@ impl SourceMap { /// Return the SourceFile that contains the given `BytePos` pub fn lookup_source_file(&self, pos: BytePos) -> Lrc { let idx = self.lookup_source_file_idx(pos); - (*self.files.borrow().source_files)[idx].clone() + Lrc::clone(&(*self.files.borrow().source_files)[idx]) } /// Looks up source information about a `BytePos`. @@ -468,7 +468,7 @@ impl SourceMap { if lo != hi { return true; } - let f = (*self.files.borrow().source_files)[lo].clone(); + let f = Lrc::clone(&(*self.files.borrow().source_files)[lo]); let lo = f.relative_position(sp.lo()); let hi = f.relative_position(sp.hi()); f.lookup_line(lo) != f.lookup_line(hi) @@ -994,7 +994,7 @@ impl SourceMap { let filename = self.path_mapping().map_filename_prefix(filename).0; for sf in self.files.borrow().source_files.iter() { if filename == sf.name { - return Some(sf.clone()); + return Some(Lrc::clone(&sf)); } } None @@ -1003,7 +1003,7 @@ impl SourceMap { /// For a global `BytePos`, computes the local offset within the containing `SourceFile`. pub fn lookup_byte_offset(&self, bpos: BytePos) -> SourceFileAndBytePos { let idx = self.lookup_source_file_idx(bpos); - let sf = (*self.files.borrow().source_files)[idx].clone(); + let sf = Lrc::clone(&(*self.files.borrow().source_files)[idx]); let offset = bpos - sf.start_pos; SourceFileAndBytePos { sf, pos: offset } } -- cgit 1.4.1-3-g733a5 From 88a9edc091fad78a8e3784fe656e369417bb6acc Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Sun, 27 Oct 2024 21:34:49 -0700 Subject: compiler: Add `is_uninhabited` and use LayoutS accessors This reduces the need of the compiler to peek on the fields of LayoutS. --- Cargo.lock | 1 + compiler/rustc_abi/src/layout.rs | 4 ++-- compiler/rustc_abi/src/lib.rs | 5 +++++ compiler/rustc_codegen_llvm/src/abi.rs | 4 ++-- compiler/rustc_codegen_llvm/src/debuginfo/mod.rs | 2 +- compiler/rustc_codegen_ssa/src/mir/block.rs | 4 ++-- compiler/rustc_codegen_ssa/src/mir/place.rs | 6 +++--- compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 8 ++++---- compiler/rustc_const_eval/src/const_eval/machine.rs | 2 +- compiler/rustc_const_eval/src/interpret/discriminant.rs | 6 +++--- compiler/rustc_const_eval/src/interpret/intrinsics.rs | 2 +- compiler/rustc_const_eval/src/interpret/operator.rs | 6 +++--- compiler/rustc_const_eval/src/interpret/validity.rs | 6 +++--- compiler/rustc_const_eval/src/util/check_validity_requirement.rs | 2 +- compiler/rustc_hir_analysis/src/check/check.rs | 2 +- compiler/rustc_middle/src/ty/layout.rs | 2 +- compiler/rustc_mir_transform/Cargo.toml | 1 + compiler/rustc_mir_transform/src/unreachable_enum_branching.rs | 4 ++-- compiler/rustc_transmute/src/layout/tree.rs | 4 +--- compiler/rustc_ty_utils/src/layout/invariant.rs | 6 ++---- 20 files changed, 40 insertions(+), 37 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/Cargo.lock b/Cargo.lock index 01e814e7d7f..763fa043e47 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4095,6 +4095,7 @@ version = "0.0.0" dependencies = [ "either", "itertools", + "rustc_abi", "rustc_arena", "rustc_ast", "rustc_attr", diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 0340d1bd6bc..81fe31619c3 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -28,7 +28,7 @@ where VariantIdx: Idx, F: Deref> + fmt::Debug, { - let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited()); + let uninhabited = fields.iter().any(|f| f.is_uninhabited()); // We cannot ignore alignment; that might lead us to entirely discard a variant and // produce an enum that is less aligned than it should be! let is_1zst = fields.iter().all(|f| f.is_1zst()); @@ -681,7 +681,7 @@ impl LayoutCalculator { let discr_type = repr.discr_type(); let bits = Integer::from_attr(dl, discr_type).size().bits(); for (i, mut val) in discriminants { - if !repr.c() && variants[i].iter().any(|f| f.abi.is_uninhabited()) { + if !repr.c() && variants[i].iter().any(|f| f.is_uninhabited()) { continue; } if discr_type.is_signed() { diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 8e90130da4c..17dd6c9f443 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -1652,6 +1652,11 @@ impl LayoutS { } } + /// Returns `true` if this is an uninhabited type + pub fn is_uninhabited(&self) -> bool { + self.abi.is_uninhabited() + } + pub fn scalar(cx: &C, scalar: Scalar) -> Self { let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar); let size = scalar.size(cx); diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 2fe5ed32daa..8a1ee48c43c 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -415,7 +415,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { instance: Option>, ) { let mut func_attrs = SmallVec::<[_; 3]>::new(); - if self.ret.layout.abi.is_uninhabited() { + if self.ret.layout.is_uninhabited() { func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(cx.llcx)); } if !self.can_unwind { @@ -532,7 +532,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { fn apply_attrs_callsite(&self, bx: &mut Builder<'_, 'll, 'tcx>, callsite: &'ll Value) { let mut func_attrs = SmallVec::<[_; 2]>::new(); - if self.ret.layout.abi.is_uninhabited() { + if self.ret.layout.is_uninhabited() { func_attrs.push(llvm::AttributeKind::NoReturn.create_attr(bx.cx.llcx)); } if !self.can_unwind { diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index 3de4ca77e7d..5b32a542750 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -364,7 +364,7 @@ impl<'ll, 'tcx> DebugInfoCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { let mut flags = DIFlags::FlagPrototyped; - if fn_abi.ret.layout.abi.is_uninhabited() { + if fn_abi.ret.layout.is_uninhabited() { flags |= DIFlags::FlagNoReturn; } diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index e3553dc03e1..a17a127f014 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -438,7 +438,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { _ => bug!("C-variadic function must have a `VaList` place"), } } - if self.fn_abi.ret.layout.abi.is_uninhabited() { + if self.fn_abi.ret.layout.is_uninhabited() { // Functions with uninhabited return values are marked `noreturn`, // so we should make sure that we never actually do. // We play it safe by using a well-defined `abort`, but we could go for immediate UB @@ -774,7 +774,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Some(if do_panic { let msg_str = with_no_visible_paths!({ with_no_trimmed_paths!({ - if layout.abi.is_uninhabited() { + if layout.is_uninhabited() { // Use this error even for the other intrinsics as it is more precise. format!("attempted to instantiate uninhabited type `{ty}`") } else if requirement == ValidityRequirement::Zero { diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index a7d5541481a..15c8e534461 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -55,7 +55,7 @@ impl PlaceValue { /// Creates a `PlaceRef` to this location with the given type. pub fn with_type<'tcx>(self, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> { assert!( - layout.is_unsized() || layout.abi.is_uninhabited() || self.llextra.is_none(), + layout.is_unsized() || layout.is_uninhabited() || self.llextra.is_none(), "Had pointer metadata {:?} for sized type {layout:?}", self.llextra, ); @@ -239,7 +239,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { let dl = &bx.tcx().data_layout; let cast_to_layout = bx.cx().layout_of(cast_to); let cast_to = bx.cx().immediate_backend_type(cast_to_layout); - if self.layout.abi.is_uninhabited() { + if self.layout.is_uninhabited() { return bx.cx().const_poison(cast_to); } let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants { @@ -358,7 +358,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { bx: &mut Bx, variant_index: VariantIdx, ) { - if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { + if self.layout.for_variant(bx.cx(), variant_index).is_uninhabited() { // We play it safe by using a well-defined `abort`, but we could go for immediate UB // if that turns out to be helpful. bx.abort(); diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 82fea4c58e1..6e8c193cd75 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -203,10 +203,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ) -> Option> { // Check for transmutes that are always UB. if operand.layout.size != cast.size - || operand.layout.abi.is_uninhabited() - || cast.abi.is_uninhabited() + || operand.layout.is_uninhabited() + || cast.is_uninhabited() { - if !operand.layout.abi.is_uninhabited() { + if !operand.layout.is_uninhabited() { // Since this is known statically and the input could have existed // without already having hit UB, might as well trap for it. bx.abort(); @@ -555,7 +555,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { assert!(bx.cx().is_backend_immediate(cast)); let to_backend_ty = bx.cx().immediate_backend_type(cast); - if operand.layout.abi.is_uninhabited() { + if operand.layout.is_uninhabited() { let val = OperandValue::Immediate(bx.cx().const_poison(to_backend_ty)); return OperandRef { val, layout: cast }; } diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index d54c5b750f0..5f0bc8539ee 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -395,7 +395,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> { #[inline(always)] fn enforce_validity(ecx: &InterpCx<'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool { - ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.abi.is_uninhabited() + ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.is_uninhabited() } fn load_mir( diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs index 81e0b1e12ca..feed0860679 100644 --- a/compiler/rustc_const_eval/src/interpret/discriminant.rs +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -27,7 +27,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // discriminant, so we cannot do anything here. // When evaluating we will always error before even getting here, but ConstProp 'executes' // dead code, so we cannot ICE here. - if dest.layout().for_variant(self, variant_index).abi.is_uninhabited() { + if dest.layout().for_variant(self, variant_index).is_uninhabited() { throw_ub!(UninhabitedEnumVariantWritten(variant_index)) } @@ -86,7 +86,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // For consistency with `write_discriminant`, and to make sure that // `project_downcast` cannot fail due to strange layouts, we declare immediate UB // for uninhabited variants. - if op.layout().for_variant(self, index).abi.is_uninhabited() { + if op.layout().for_variant(self, index).is_uninhabited() { throw_ub!(UninhabitedEnumVariantRead(index)) } } @@ -203,7 +203,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Reading the discriminant of an uninhabited variant is UB. This is the basis for the // `uninhabited_enum_branching` MIR pass. It also ensures consistency with // `write_discriminant`. - if op.layout().for_variant(self, index).abi.is_uninhabited() { + if op.layout().for_variant(self, index).is_uninhabited() { throw_ub!(UninhabitedEnumVariantRead(index)) } interp_ok(index) diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 4e603f57c56..6148123bdfe 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -364,7 +364,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let msg = match requirement { // For *all* intrinsics we first check `is_uninhabited` to give a more specific // error message. - _ if layout.abi.is_uninhabited() => format!( + _ if layout.is_uninhabited() => format!( "aborted execution: attempted to instantiate uninhabited type `{ty}`" ), ValidityRequirement::Inhabited => bug!("handled earlier"), diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index b28ac68ac54..380db907481 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -315,7 +315,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let ptr = left.to_scalar().to_pointer(self)?; let pointee_ty = left.layout.ty.builtin_deref(true).unwrap(); let pointee_layout = self.layout_of(pointee_ty)?; - assert!(pointee_layout.abi.is_sized()); + assert!(pointee_layout.is_sized()); // The size always fits in `i64` as it can be at most `isize::MAX`. let pointee_size = i64::try_from(pointee_layout.size.bytes()).unwrap(); @@ -518,14 +518,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { interp_ok(match null_op { SizeOf => { - if !layout.abi.is_sized() { + if !layout.is_sized() { span_bug!(self.cur_span(), "unsized type for `NullaryOp::SizeOf`"); } let val = layout.size.bytes(); ImmTy::from_uint(val, usize_layout()) } AlignOf => { - if !layout.abi.is_sized() { + if !layout.is_sized() { span_bug!(self.cur_span(), "unsized type for `NullaryOp::AlignOf`"); } let val = layout.align.abi.bytes(); diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index b6120ce82fe..8b5bb1332e7 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -542,7 +542,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { throw_validation_failure!(self.path, NullPtr { ptr_kind }) } // Do not allow references to uninhabited types. - if place.layout.abi.is_uninhabited() { + if place.layout.is_uninhabited() { let ty = place.layout.ty; throw_validation_failure!(self.path, PtrToUninhabited { ptr_kind, ty }) } @@ -867,7 +867,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { /// Add the entire given place to the "data" range of this visit. fn add_data_range_place(&mut self, place: &PlaceTy<'tcx, M::Provenance>) { // Only sized places can be added this way. - debug_assert!(place.layout.abi.is_sized()); + debug_assert!(place.layout.is_sized()); if let Some(data_bytes) = self.data_bytes.as_mut() { let offset = Self::data_range_offset(self.ecx, place); data_bytes.add_range(offset, place.layout.size); @@ -945,7 +945,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { layout: TyAndLayout<'tcx>, ) -> Cow<'e, RangeSet> { assert!(layout.ty.is_union()); - assert!(layout.abi.is_sized(), "there are no unsized unions"); + assert!(layout.is_sized(), "there are no unsized unions"); let layout_cx = LayoutCx::new(*ecx.tcx, ecx.param_env); return M::cached_union_data_range(ecx, layout.ty, || { let mut out = RangeSet(Vec::new()); diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs index 649179d5b1e..7a8b976dfc4 100644 --- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs +++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs @@ -29,7 +29,7 @@ pub fn check_validity_requirement<'tcx>( // There is nothing strict or lax about inhabitedness. if kind == ValidityRequirement::Inhabited { - return Ok(!layout.abi.is_uninhabited()); + return Ok(!layout.is_uninhabited()); } let layout_cx = LayoutCx::new(tcx, param_env_and_ty.param_env); diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs index 97f3f1c8ef2..d415c858668 100644 --- a/compiler/rustc_hir_analysis/src/check/check.rs +++ b/compiler/rustc_hir_analysis/src/check/check.rs @@ -172,7 +172,7 @@ fn check_static_inhabited(tcx: TyCtxt<'_>, def_id: LocalDefId) { return; } }; - if layout.abi.is_uninhabited() { + if layout.is_uninhabited() { tcx.node_span_lint( UNINHABITED_STATIC, tcx.local_def_id_to_hir_id(def_id), diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 7e65df6b27c..7d05f70b3c6 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -346,7 +346,7 @@ impl<'tcx> SizeSkeleton<'tcx> { // First try computing a static layout. let err = match tcx.layout_of(param_env.and(ty)) { Ok(layout) => { - if layout.abi.is_sized() { + if layout.is_sized() { return Ok(SizeSkeleton::Known(layout.size, Some(layout.align.abi))); } else { // Just to be safe, don't claim a known layout for unsized types. diff --git a/compiler/rustc_mir_transform/Cargo.toml b/compiler/rustc_mir_transform/Cargo.toml index 07ca51a67ae..4b648d21084 100644 --- a/compiler/rustc_mir_transform/Cargo.toml +++ b/compiler/rustc_mir_transform/Cargo.toml @@ -7,6 +7,7 @@ edition = "2021" # tidy-alphabetical-start either = "1" itertools = "0.12" +rustc_abi = { path = "../rustc_abi" } rustc_arena = { path = "../rustc_arena" } rustc_ast = { path = "../rustc_ast" } rustc_attr = { path = "../rustc_attr" } diff --git a/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs b/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs index 5612e779d6b..3011af4d9d7 100644 --- a/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs +++ b/compiler/rustc_mir_transform/src/unreachable_enum_branching.rs @@ -1,5 +1,6 @@ //! A pass that eliminates branches on uninhabited or unreachable enum variants. +use rustc_abi::Variants; use rustc_data_structures::fx::FxHashSet; use rustc_middle::bug; use rustc_middle::mir::patch::MirPatch; @@ -9,7 +10,6 @@ use rustc_middle::mir::{ }; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::{Ty, TyCtxt}; -use rustc_target::abi::{Abi, Variants}; use tracing::trace; pub(super) struct UnreachableEnumBranching; @@ -65,7 +65,7 @@ fn variant_discriminants<'tcx>( Variants::Multiple { variants, .. } => variants .iter_enumerated() .filter_map(|(idx, layout)| { - (layout.abi != Abi::Uninhabited) + (!layout.is_uninhabited()) .then(|| ty.discriminant_for_variant(tcx, idx).unwrap().val) }) .collect(), diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs index 17eddbfcd7f..a7ac8169674 100644 --- a/compiler/rustc_transmute/src/layout/tree.rs +++ b/compiler/rustc_transmute/src/layout/tree.rs @@ -339,9 +339,7 @@ pub(crate) mod rustc { // 2. enums that delegate their layout to a variant // 3. enums with multiple variants match layout.variants() { - Variants::Single { .. } - if layout.abi.is_uninhabited() && layout.size == Size::ZERO => - { + Variants::Single { .. } if layout.is_uninhabited() && layout.size == Size::ZERO => { // The layout representation of uninhabited, ZST enums is // defined to be like that of the `!` type, as opposed of a // typical enum. Consequently, they cannot be descended into diff --git a/compiler/rustc_ty_utils/src/layout/invariant.rs b/compiler/rustc_ty_utils/src/layout/invariant.rs index 6cf114b74c1..3db5a4f1805 100644 --- a/compiler/rustc_ty_utils/src/layout/invariant.rs +++ b/compiler/rustc_ty_utils/src/layout/invariant.rs @@ -10,7 +10,7 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa // Type-level uninhabitedness should always imply ABI uninhabitedness. if layout.ty.is_privately_uninhabited(tcx, cx.param_env) { - assert!(layout.abi.is_uninhabited()); + assert!(layout.is_uninhabited()); } if layout.size.bytes() % layout.align.abi.bytes() != 0 { @@ -262,9 +262,7 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa ) } // Skip empty variants. - if variant.size == Size::ZERO - || variant.fields.count() == 0 - || variant.abi.is_uninhabited() + if variant.size == Size::ZERO || variant.fields.count() == 0 || variant.is_uninhabited() { // These are never actually accessed anyway, so we can skip the coherence check // for them. They also fail that check, since they have -- cgit 1.4.1-3-g733a5 From 17636374de757aab8e1f83ef1bcce12a6a46078b Mon Sep 17 00:00:00 2001 From: klensy Date: Sun, 27 Oct 2024 09:30:46 +0300 Subject: correct LLVMRustCreateThinLTOData arg types --- compiler/rustc_codegen_llvm/src/back/lto.rs | 4 ++-- compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 4 ++-- compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp | 8 ++++---- 3 files changed, 8 insertions(+), 8 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 1f7a923dd2c..043357d6c4e 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -502,9 +502,9 @@ fn thin_lto( // upstream... let data = llvm::LLVMRustCreateThinLTOData( thin_modules.as_ptr(), - thin_modules.len() as u32, + thin_modules.len(), symbols_below_threshold.as_ptr(), - symbols_below_threshold.len() as u32, + symbols_below_threshold.len(), ) .ok_or_else(|| write::llvm_err(dcx, LlvmError::PrepareThinLtoContext))?; diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index acc66076833..30e97580844 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -2385,9 +2385,9 @@ unsafe extern "C" { pub fn LLVMRustThinLTOBufferThinLinkDataLen(M: &ThinLTOBuffer) -> size_t; pub fn LLVMRustCreateThinLTOData( Modules: *const ThinLTOModule, - NumModules: c_uint, + NumModules: size_t, PreservedSymbols: *const *const c_char, - PreservedSymbolsLen: c_uint, + PreservedSymbolsLen: size_t, ) -> Option<&'static mut ThinLTOData>; pub fn LLVMRustPrepareThinLTORename( Data: &ThinLTOData, diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp index 4b303511dbc..20bf32d731e 100644 --- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp @@ -1251,12 +1251,12 @@ getFirstDefinitionForLinker(const GlobalValueSummaryList &GVSummaryList) { // here is basically the same as before threads are spawned in the `run` // function of `lib/LTO/ThinLTOCodeGenerator.cpp`. extern "C" LLVMRustThinLTOData * -LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules, int num_modules, - const char **preserved_symbols, int num_symbols) { +LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules, size_t num_modules, + const char **preserved_symbols, size_t num_symbols) { auto Ret = std::make_unique(); // Load each module's summary and merge it into one combined index - for (int i = 0; i < num_modules; i++) { + for (size_t i = 0; i < num_modules; i++) { auto module = &modules[i]; auto buffer = StringRef(module->data, module->len); auto mem_buffer = MemoryBufferRef(buffer, module->identifier); @@ -1275,7 +1275,7 @@ LLVMRustCreateThinLTOData(LLVMRustThinLTOModule *modules, int num_modules, // Convert the preserved symbols set from string to GUID, this is then needed // for internalization. - for (int i = 0; i < num_symbols; i++) { + for (size_t i = 0; i < num_symbols; i++) { auto GUID = GlobalValue::getGUID(preserved_symbols[i]); Ret->GUIDPreservedSymbols.insert(GUID); } -- cgit 1.4.1-3-g733a5 From ba81dbf3c6bc4b647f9f1eb0e6da7f318c647a84 Mon Sep 17 00:00:00 2001 From: Zalathar Date: Tue, 29 Oct 2024 15:01:36 +1100 Subject: Don't set unnecessary module flag "LTOPostLink" This module flag was an internal detail of LLVM's optimization passes, and all code involving it was removed in LLVM 17. --- compiler/rustc_codegen_llvm/src/back/lto.rs | 21 +++------------------ compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 2 -- compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp | 5 ----- 3 files changed, 3 insertions(+), 25 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 79527d1beb3..57ec0716cae 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -25,7 +25,6 @@ use tracing::{debug, info}; use crate::back::write::{ self, CodegenDiagnosticsStage, DiagnosticHandlers, bitcode_section_name, save_temp_bitcode, }; -use crate::common::AsCCharPtr; use crate::errors::{ DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro, }; @@ -602,23 +601,9 @@ pub(crate) fn run_pass_manager( // This code is based off the code found in llvm's LTO code generator: // llvm/lib/LTO/LTOCodeGenerator.cpp debug!("running the pass manager"); - unsafe { - if !llvm::LLVMRustHasModuleFlag( - module.module_llvm.llmod(), - "LTOPostLink".as_c_char_ptr(), - 11, - ) { - llvm::LLVMRustAddModuleFlagU32( - module.module_llvm.llmod(), - llvm::LLVMModFlagBehavior::Error, - c"LTOPostLink".as_ptr(), - 1, - ); - } - let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO }; - let opt_level = config.opt_level.unwrap_or(config::OptLevel::No); - write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage)?; - } + let opt_stage = if thin { llvm::OptStage::ThinLTO } else { llvm::OptStage::FatLTO }; + let opt_level = config.opt_level.unwrap_or(config::OptLevel::No); + unsafe { write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage) }?; debug!("lto done"); Ok(()) } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index acc66076833..c204fbe32df 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1842,8 +1842,6 @@ unsafe extern "C" { value_len: size_t, ); - pub fn LLVMRustHasModuleFlag(M: &Module, name: *const c_char, len: size_t) -> bool; - pub fn LLVMRustDIBuilderCreate(M: &Module) -> &mut DIBuilder<'_>; pub fn LLVMRustDIBuilderDispose<'a>(Builder: &'a mut DIBuilder<'a>); diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index 9f941637d8c..634e46aaf8b 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -867,11 +867,6 @@ extern "C" void LLVMRustAddModuleFlagString( MDString::get(unwrap(M)->getContext(), StringRef(Value, ValueLen))); } -extern "C" bool LLVMRustHasModuleFlag(LLVMModuleRef M, const char *Name, - size_t Len) { - return unwrap(M)->getModuleFlag(StringRef(Name, Len)) != nullptr; -} - extern "C" void LLVMRustGlobalAddMetadata(LLVMValueRef Global, unsigned Kind, LLVMMetadataRef MD) { unwrap(Global)->addMetadata(Kind, *unwrap(MD)); -- cgit 1.4.1-3-g733a5 From 8d2ed4f0f3f79c402d2ebe585e0904baf4f8d634 Mon Sep 17 00:00:00 2001 From: Zalathar Date: Tue, 29 Oct 2024 13:38:17 +1100 Subject: Clean up FFI calls for setting module flags - Don't rely on enum values defined by LLVM's C++ API - Use safe wrapper functions instead of direct `unsafe` calls - Consistently pass pointer/length strings instead of C strings --- compiler/rustc_codegen_llvm/src/context.rs | 278 ++++++++++------------- compiler/rustc_codegen_llvm/src/debuginfo/mod.rs | 66 +++--- compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 24 +- compiler/rustc_codegen_llvm/src/llvm/mod.rs | 29 +++ compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp | 57 ++++- 5 files changed, 242 insertions(+), 212 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 1a2197248b1..f33da42d63e 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -210,133 +210,111 @@ pub(crate) unsafe fn create_module<'ll>( // If skipping the PLT is enabled, we need to add some module metadata // to ensure intrinsic calls don't use it. if !sess.needs_plt() { - let avoid_plt = c"RtLibUseGOT".as_ptr(); - unsafe { - llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Warning, avoid_plt, 1); - } + llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Warning, "RtLibUseGOT", 1); } // Enable canonical jump tables if CFI is enabled. (See https://reviews.llvm.org/D65629.) if sess.is_sanitizer_cfi_canonical_jump_tables_enabled() && sess.is_sanitizer_cfi_enabled() { - let canonical_jump_tables = c"CFI Canonical Jump Tables".as_ptr(); - unsafe { - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Override, - canonical_jump_tables, - 1, - ); - } + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Override, + "CFI Canonical Jump Tables", + 1, + ); } // If we're normalizing integers with CFI, ensure LLVM generated functions do the same. // See https://github.com/llvm/llvm-project/pull/104826 if sess.is_sanitizer_cfi_normalize_integers_enabled() { - let cfi_normalize_integers = c"cfi-normalize-integers".as_ptr(); - unsafe { - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Override, - cfi_normalize_integers, - 1, - ); - } + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Override, + "cfi-normalize-integers", + 1, + ); } // Enable LTO unit splitting if specified or if CFI is enabled. (See // https://reviews.llvm.org/D53891.) if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() { - let enable_split_lto_unit = c"EnableSplitLTOUnit".as_ptr(); - unsafe { - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Override, - enable_split_lto_unit, - 1, - ); - } + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Override, + "EnableSplitLTOUnit", + 1, + ); } // Add "kcfi" module flag if KCFI is enabled. (See https://reviews.llvm.org/D119296.) if sess.is_sanitizer_kcfi_enabled() { - let kcfi = c"kcfi".as_ptr(); - unsafe { - llvm::LLVMRustAddModuleFlagU32(llmod, llvm::LLVMModFlagBehavior::Override, kcfi, 1); - } + llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Override, "kcfi", 1); // Add "kcfi-offset" module flag with -Z patchable-function-entry (See // https://reviews.llvm.org/D141172). let pfe = PatchableFunctionEntry::from_config(sess.opts.unstable_opts.patchable_function_entry); if pfe.prefix() > 0 { - let kcfi_offset = c"kcfi-offset".as_ptr(); - unsafe { - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Override, - kcfi_offset, - pfe.prefix().into(), - ); - } + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Override, + "kcfi-offset", + pfe.prefix().into(), + ); } } // Control Flow Guard is currently only supported by the MSVC linker on Windows. if sess.target.is_like_msvc { - unsafe { - match sess.opts.cg.control_flow_guard { - CFGuard::Disabled => {} - CFGuard::NoChecks => { - // Set `cfguard=1` module flag to emit metadata only. - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Warning, - c"cfguard".as_ptr() as *const _, - 1, - ) - } - CFGuard::Checks => { - // Set `cfguard=2` module flag to emit metadata and checks. - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Warning, - c"cfguard".as_ptr() as *const _, - 2, - ) - } + match sess.opts.cg.control_flow_guard { + CFGuard::Disabled => {} + CFGuard::NoChecks => { + // Set `cfguard=1` module flag to emit metadata only. + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Warning, + "cfguard", + 1, + ); + } + CFGuard::Checks => { + // Set `cfguard=2` module flag to emit metadata and checks. + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Warning, + "cfguard", + 2, + ); } } } if let Some(BranchProtection { bti, pac_ret }) = sess.opts.unstable_opts.branch_protection { if sess.target.arch == "aarch64" { - unsafe { - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Min, - c"branch-target-enforcement".as_ptr(), - bti.into(), - ); - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Min, - c"sign-return-address".as_ptr(), - pac_ret.is_some().into(), - ); - let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A }); - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Min, - c"sign-return-address-all".as_ptr(), - pac_opts.leaf.into(), - ); - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Min, - c"sign-return-address-with-bkey".as_ptr(), - u32::from(pac_opts.key == PAuthKey::B), - ); - } + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Min, + "branch-target-enforcement", + bti.into(), + ); + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Min, + "sign-return-address", + pac_ret.is_some().into(), + ); + let pac_opts = pac_ret.unwrap_or(PacRet { leaf: false, key: PAuthKey::A }); + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Min, + "sign-return-address-all", + pac_opts.leaf.into(), + ); + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Min, + "sign-return-address-with-bkey", + u32::from(pac_opts.key == PAuthKey::B), + ); } else { bug!( "branch-protection used on non-AArch64 target; \ @@ -347,59 +325,46 @@ pub(crate) unsafe fn create_module<'ll>( // Pass on the control-flow protection flags to LLVM (equivalent to `-fcf-protection` in Clang). if let CFProtection::Branch | CFProtection::Full = sess.opts.unstable_opts.cf_protection { - unsafe { - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Override, - c"cf-protection-branch".as_ptr(), - 1, - ); - } + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Override, + "cf-protection-branch", + 1, + ); } if let CFProtection::Return | CFProtection::Full = sess.opts.unstable_opts.cf_protection { - unsafe { - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Override, - c"cf-protection-return".as_ptr(), - 1, - ); - } + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Override, + "cf-protection-return", + 1, + ); } if sess.opts.unstable_opts.virtual_function_elimination { - unsafe { - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Error, - c"Virtual Function Elim".as_ptr(), - 1, - ); - } + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Error, + "Virtual Function Elim", + 1, + ); } // Set module flag to enable Windows EHCont Guard (/guard:ehcont). if sess.opts.unstable_opts.ehcont_guard { - unsafe { - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Warning, - c"ehcontguard".as_ptr() as *const _, - 1, - ) - } + llvm::add_module_flag_u32(llmod, llvm::ModuleFlagMergeBehavior::Warning, "ehcontguard", 1); } match sess.opts.unstable_opts.function_return { FunctionReturn::Keep => {} - FunctionReturn::ThunkExtern => unsafe { - llvm::LLVMRustAddModuleFlagU32( + FunctionReturn::ThunkExtern => { + llvm::add_module_flag_u32( llmod, - llvm::LLVMModFlagBehavior::Override, - c"function_return_thunk_extern".as_ptr(), + llvm::ModuleFlagMergeBehavior::Override, + "function_return_thunk_extern", 1, - ) - }, + ); + } } match (sess.opts.unstable_opts.small_data_threshold, sess.target.small_data_threshold_support()) @@ -407,15 +372,12 @@ pub(crate) unsafe fn create_module<'ll>( // Set up the small-data optimization limit for architectures that use // an LLVM module flag to control this. (Some(threshold), SmallDataThresholdSupport::LlvmModuleFlag(flag)) => { - let flag = SmallCStr::new(flag.as_ref()); - unsafe { - llvm::LLVMRustAddModuleFlagU32( - llmod, - llvm::LLVMModFlagBehavior::Error, - flag.as_c_str().as_ptr(), - threshold as u32, - ) - } + llvm::add_module_flag_u32( + llmod, + llvm::ModuleFlagMergeBehavior::Error, + &flag, + threshold as u32, + ); } _ => (), }; @@ -449,33 +411,29 @@ pub(crate) unsafe fn create_module<'ll>( // If llvm_abiname is empty, emit nothing. let llvm_abiname = &sess.target.options.llvm_abiname; if matches!(sess.target.arch.as_ref(), "riscv32" | "riscv64") && !llvm_abiname.is_empty() { - unsafe { - llvm::LLVMRustAddModuleFlagString( - llmod, - llvm::LLVMModFlagBehavior::Error, - c"target-abi".as_ptr(), - llvm_abiname.as_c_char_ptr(), - llvm_abiname.len(), - ); - } + llvm::add_module_flag_str( + llmod, + llvm::ModuleFlagMergeBehavior::Error, + "target-abi", + llvm_abiname, + ); } // Add module flags specified via -Z llvm_module_flag - for (key, value, behavior) in &sess.opts.unstable_opts.llvm_module_flag { - let key = format!("{key}\0"); - let behavior = match behavior.as_str() { - "error" => llvm::LLVMModFlagBehavior::Error, - "warning" => llvm::LLVMModFlagBehavior::Warning, - "require" => llvm::LLVMModFlagBehavior::Require, - "override" => llvm::LLVMModFlagBehavior::Override, - "append" => llvm::LLVMModFlagBehavior::Append, - "appendunique" => llvm::LLVMModFlagBehavior::AppendUnique, - "max" => llvm::LLVMModFlagBehavior::Max, - "min" => llvm::LLVMModFlagBehavior::Min, + for (key, value, merge_behavior) in &sess.opts.unstable_opts.llvm_module_flag { + let merge_behavior = match merge_behavior.as_str() { + "error" => llvm::ModuleFlagMergeBehavior::Error, + "warning" => llvm::ModuleFlagMergeBehavior::Warning, + "require" => llvm::ModuleFlagMergeBehavior::Require, + "override" => llvm::ModuleFlagMergeBehavior::Override, + "append" => llvm::ModuleFlagMergeBehavior::Append, + "appendunique" => llvm::ModuleFlagMergeBehavior::AppendUnique, + "max" => llvm::ModuleFlagMergeBehavior::Max, + "min" => llvm::ModuleFlagMergeBehavior::Min, // We already checked this during option parsing _ => unreachable!(), }; - unsafe { llvm::LLVMRustAddModuleFlagU32(llmod, behavior, key.as_c_char_ptr(), *value) } + llvm::add_module_flag_u32(llmod, merge_behavior, key, *value); } llmod diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs index 85eb1c8509d..72e723aa849 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/mod.rs @@ -91,45 +91,39 @@ impl<'ll, 'tcx> CodegenUnitDebugContext<'ll, 'tcx> { } pub(crate) fn finalize(&self, sess: &Session) { - unsafe { - llvm::LLVMRustDIBuilderFinalize(self.builder); - - if !sess.target.is_like_msvc { - // Debuginfo generation in LLVM by default uses a higher - // version of dwarf than macOS currently understands. We can - // instruct LLVM to emit an older version of dwarf, however, - // for macOS to understand. For more info see #11352 - // This can be overridden using --llvm-opts -dwarf-version,N. - // Android has the same issue (#22398) - let dwarf_version = sess - .opts - .unstable_opts - .dwarf_version - .unwrap_or(sess.target.default_dwarf_version); - llvm::LLVMRustAddModuleFlagU32( - self.llmod, - llvm::LLVMModFlagBehavior::Warning, - c"Dwarf Version".as_ptr(), - dwarf_version, - ); - } else { - // Indicate that we want CodeView debug information on MSVC - llvm::LLVMRustAddModuleFlagU32( - self.llmod, - llvm::LLVMModFlagBehavior::Warning, - c"CodeView".as_ptr(), - 1, - ) - } - - // Prevent bitcode readers from deleting the debug info. - llvm::LLVMRustAddModuleFlagU32( + unsafe { llvm::LLVMRustDIBuilderFinalize(self.builder) }; + if !sess.target.is_like_msvc { + // Debuginfo generation in LLVM by default uses a higher + // version of dwarf than macOS currently understands. We can + // instruct LLVM to emit an older version of dwarf, however, + // for macOS to understand. For more info see #11352 + // This can be overridden using --llvm-opts -dwarf-version,N. + // Android has the same issue (#22398) + let dwarf_version = + sess.opts.unstable_opts.dwarf_version.unwrap_or(sess.target.default_dwarf_version); + llvm::add_module_flag_u32( self.llmod, - llvm::LLVMModFlagBehavior::Warning, - c"Debug Info Version".as_ptr(), - llvm::LLVMRustDebugMetadataVersion(), + llvm::ModuleFlagMergeBehavior::Warning, + "Dwarf Version", + dwarf_version, + ); + } else { + // Indicate that we want CodeView debug information on MSVC + llvm::add_module_flag_u32( + self.llmod, + llvm::ModuleFlagMergeBehavior::Warning, + "CodeView", + 1, ); } + + // Prevent bitcode readers from deleting the debug info. + llvm::add_module_flag_u32( + self.llmod, + llvm::ModuleFlagMergeBehavior::Warning, + "Debug Info Version", + unsafe { llvm::LLVMRustDebugMetadataVersion() }, + ); } } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index c204fbe32df..fffbae19c9c 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -85,7 +85,7 @@ pub enum LLVMMachineType { ARM = 0x01c0, } -/// LLVM's Module::ModFlagBehavior, defined in llvm/include/llvm/IR/Module.h. +/// Must match the layout of `LLVMRustModuleFlagMergeBehavior`. /// /// When merging modules (e.g. during LTO), their metadata flags are combined. Conflicts are /// resolved according to the merge behaviors specified here. Flags differing only in merge @@ -93,9 +93,13 @@ pub enum LLVMMachineType { /// /// In order for Rust-C LTO to work, we must specify behaviors compatible with Clang. Notably, /// 'Error' and 'Warning' cannot be mixed for a given flag. +/// +/// There is a stable LLVM-C version of this enum (`LLVMModuleFlagBehavior`), +/// but as of LLVM 19 it does not support all of the enum values in the unstable +/// C++ API. #[derive(Copy, Clone, PartialEq)] #[repr(C)] -pub enum LLVMModFlagBehavior { +pub enum ModuleFlagMergeBehavior { Error = 1, Warning = 2, Require = 3, @@ -1829,17 +1833,19 @@ unsafe extern "C" { /// "compatible" means depends on the merge behaviors involved. pub fn LLVMRustAddModuleFlagU32( M: &Module, - merge_behavior: LLVMModFlagBehavior, - name: *const c_char, - value: u32, + MergeBehavior: ModuleFlagMergeBehavior, + Name: *const c_char, + NameLen: size_t, + Value: u32, ); pub fn LLVMRustAddModuleFlagString( M: &Module, - merge_behavior: LLVMModFlagBehavior, - name: *const c_char, - value: *const c_char, - value_len: size_t, + MergeBehavior: ModuleFlagMergeBehavior, + Name: *const c_char, + NameLen: size_t, + Value: *const c_char, + ValueLen: size_t, ); pub fn LLVMRustDIBuilderCreate(M: &Module) -> &mut DIBuilder<'_>; diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index cabe6c031d3..acd425bbb8e 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -352,3 +352,32 @@ impl Drop for OperandBundleDef<'_> { } } } + +pub(crate) fn add_module_flag_u32( + module: &Module, + merge_behavior: ModuleFlagMergeBehavior, + key: &str, + value: u32, +) { + unsafe { + LLVMRustAddModuleFlagU32(module, merge_behavior, key.as_c_char_ptr(), key.len(), value); + } +} + +pub(crate) fn add_module_flag_str( + module: &Module, + merge_behavior: ModuleFlagMergeBehavior, + key: &str, + value: &str, +) { + unsafe { + LLVMRustAddModuleFlagString( + module, + merge_behavior, + key.as_c_char_ptr(), + key.len(), + value.as_c_char_ptr(), + value.len(), + ); + } +} diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index 634e46aaf8b..690f808c5f1 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -853,17 +853,60 @@ extern "C" uint32_t LLVMRustVersionMinor() { return LLVM_VERSION_MINOR; } extern "C" uint32_t LLVMRustVersionMajor() { return LLVM_VERSION_MAJOR; } -extern "C" void LLVMRustAddModuleFlagU32(LLVMModuleRef M, - Module::ModFlagBehavior MergeBehavior, - const char *Name, uint32_t Value) { - unwrap(M)->addModuleFlag(MergeBehavior, Name, Value); +// FFI equivalent of LLVM's `llvm::Module::ModFlagBehavior`. +// Must match the layout of +// `rustc_codegen_llvm::llvm::ffi::ModuleFlagMergeBehavior`. +// +// There is a stable LLVM-C version of this enum (`LLVMModuleFlagBehavior`), +// but as of LLVM 19 it does not support all of the enum values in the unstable +// C++ API. +enum class LLVMRustModuleFlagMergeBehavior { + Error = 1, + Warning = 2, + Require = 3, + Override = 4, + Append = 5, + AppendUnique = 6, + Max = 7, + Min = 8, +}; + +static Module::ModFlagBehavior +fromRust(LLVMRustModuleFlagMergeBehavior Behavior) { + switch (Behavior) { + case LLVMRustModuleFlagMergeBehavior::Error: + return Module::ModFlagBehavior::Error; + case LLVMRustModuleFlagMergeBehavior::Warning: + return Module::ModFlagBehavior::Warning; + case LLVMRustModuleFlagMergeBehavior::Require: + return Module::ModFlagBehavior::Require; + case LLVMRustModuleFlagMergeBehavior::Override: + return Module::ModFlagBehavior::Override; + case LLVMRustModuleFlagMergeBehavior::Append: + return Module::ModFlagBehavior::Append; + case LLVMRustModuleFlagMergeBehavior::AppendUnique: + return Module::ModFlagBehavior::AppendUnique; + case LLVMRustModuleFlagMergeBehavior::Max: + return Module::ModFlagBehavior::Max; + case LLVMRustModuleFlagMergeBehavior::Min: + return Module::ModFlagBehavior::Min; + } + report_fatal_error("bad LLVMRustModuleFlagMergeBehavior"); +} + +extern "C" void +LLVMRustAddModuleFlagU32(LLVMModuleRef M, + LLVMRustModuleFlagMergeBehavior MergeBehavior, + const char *Name, size_t NameLen, uint32_t Value) { + unwrap(M)->addModuleFlag(fromRust(MergeBehavior), StringRef(Name, NameLen), + Value); } extern "C" void LLVMRustAddModuleFlagString( - LLVMModuleRef M, Module::ModFlagBehavior MergeBehavior, const char *Name, - const char *Value, size_t ValueLen) { + LLVMModuleRef M, LLVMRustModuleFlagMergeBehavior MergeBehavior, + const char *Name, size_t NameLen, const char *Value, size_t ValueLen) { unwrap(M)->addModuleFlag( - MergeBehavior, Name, + fromRust(MergeBehavior), StringRef(Name, NameLen), MDString::get(unwrap(M)->getContext(), StringRef(Value, ValueLen))); } -- cgit 1.4.1-3-g733a5 From 7086dd83cca1cf694c7bd171efbf262fa8ffb4aa Mon Sep 17 00:00:00 2001 From: Jubilee Young Date: Tue, 29 Oct 2024 13:37:26 -0700 Subject: compiler: `rustc_abi::Abi` => `BackendRepr` The initial naming of "Abi" was an awful mistake, conveying wrong ideas about how psABIs worked and even more about what the enum meant. It was only meant to represent the way the value would be described to a codegen backend as it was lowered to that intermediate representation. It was never meant to mean anything about the actual psABI handling! The conflation is because LLVM typically will associate a certain form with a certain ABI, but even that does not hold when the special cases that actually exist arise, plus the IR annotations that modify the ABI. Reframe `rustc_abi::Abi` as the `BackendRepr` of the type, and rename `BackendRepr::Aggregate` as `BackendRepr::Memory`. Unfortunately, due to the persistent misunderstandings, this too is now incorrect: - Scattered ABI-relevant code is entangled with BackendRepr - We do not always pre-compute a correct BackendRepr that reflects how we "actually" want this value to be handled, so we leave the backend interface to also inject various special-cases here - In some cases `BackendRepr::Memory` is a "real" aggregate, but in others it is in fact using memory, and in some cases it is a scalar! Our rustc-to-backend lowering code handles this sort of thing right now. That will eventually be addressed by lifting duplicated lowering code to either rustc_codegen_ssa or rustc_target as appropriate. --- compiler/rustc_abi/src/callconv.rs | 16 +-- compiler/rustc_abi/src/layout.rs | 104 +++++++++---------- compiler/rustc_abi/src/layout/ty.rs | 12 +-- compiler/rustc_abi/src/lib.rs | 112 ++++++++++++--------- compiler/rustc_codegen_llvm/src/abi.rs | 10 +- compiler/rustc_codegen_llvm/src/asm.rs | 93 +++++++++-------- compiler/rustc_codegen_llvm/src/builder.rs | 4 +- compiler/rustc_codegen_llvm/src/intrinsic.rs | 13 +-- compiler/rustc_codegen_llvm/src/type_of.rs | 37 ++++--- compiler/rustc_codegen_ssa/src/mir/block.rs | 2 +- compiler/rustc_codegen_ssa/src/mir/constant.rs | 4 +- compiler/rustc_codegen_ssa/src/mir/debuginfo.rs | 4 +- compiler/rustc_codegen_ssa/src/mir/operand.rs | 39 +++---- compiler/rustc_codegen_ssa/src/mir/rvalue.rs | 10 +- compiler/rustc_codegen_ssa/src/traits/builder.rs | 4 +- .../src/const_eval/dummy_machine.rs | 2 +- .../src/const_eval/eval_queries.rs | 6 +- .../rustc_const_eval/src/const_eval/valtrees.rs | 6 +- compiler/rustc_const_eval/src/interpret/call.rs | 4 +- compiler/rustc_const_eval/src/interpret/cast.rs | 2 +- .../rustc_const_eval/src/interpret/discriminant.rs | 2 +- .../rustc_const_eval/src/interpret/intrinsics.rs | 2 +- compiler/rustc_const_eval/src/interpret/operand.rs | 53 ++++++---- .../rustc_const_eval/src/interpret/operator.rs | 12 +-- compiler/rustc_const_eval/src/interpret/place.rs | 12 ++- .../rustc_const_eval/src/interpret/validity.rs | 23 +++-- .../src/util/check_validity_requirement.rs | 14 +-- compiler/rustc_lint/src/builtin.rs | 6 +- compiler/rustc_lint/src/foreign_modules.rs | 2 +- compiler/rustc_lint/src/types.rs | 6 +- compiler/rustc_middle/src/ty/layout.rs | 7 +- .../rustc_mir_build/src/build/expr/as_rvalue.rs | 4 +- compiler/rustc_mir_dataflow/src/value_analysis.rs | 2 +- .../rustc_mir_transform/src/dataflow_const_prop.rs | 12 +-- compiler/rustc_mir_transform/src/gvn.rs | 19 ++-- .../rustc_mir_transform/src/known_panics_lint.rs | 10 +- compiler/rustc_passes/src/layout_test.rs | 6 +- compiler/rustc_smir/src/rustc_smir/convert/abi.rs | 14 +-- compiler/rustc_target/src/callconv/loongarch.rs | 18 ++-- compiler/rustc_target/src/callconv/mips64.rs | 8 +- compiler/rustc_target/src/callconv/mod.rs | 29 +++--- compiler/rustc_target/src/callconv/riscv.rs | 18 ++-- compiler/rustc_target/src/callconv/sparc64.rs | 8 +- compiler/rustc_target/src/callconv/x86.rs | 18 ++-- compiler/rustc_target/src/callconv/x86_64.rs | 14 +-- compiler/rustc_target/src/callconv/x86_win64.rs | 27 ++--- compiler/rustc_target/src/callconv/xtensa.rs | 6 +- .../src/traits/dyn_compatibility.rs | 10 +- compiler/rustc_ty_utils/src/abi.rs | 14 +-- compiler/rustc_ty_utils/src/layout.rs | 43 ++++---- compiler/rustc_ty_utils/src/layout/invariant.rs | 42 ++++---- 51 files changed, 517 insertions(+), 428 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_abi/src/callconv.rs b/compiler/rustc_abi/src/callconv.rs index 872cae59a4e..ee63e46e88c 100644 --- a/compiler/rustc_abi/src/callconv.rs +++ b/compiler/rustc_abi/src/callconv.rs @@ -6,9 +6,9 @@ mod abi { #[cfg(feature = "nightly")] use rustc_macros::HashStable_Generic; -#[cfg(feature = "nightly")] -use crate::{Abi, FieldsShape, TyAbiInterface, TyAndLayout}; use crate::{Align, HasDataLayout, Size}; +#[cfg(feature = "nightly")] +use crate::{BackendRepr, FieldsShape, TyAbiInterface, TyAndLayout}; #[cfg_attr(feature = "nightly", derive(HashStable_Generic))] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] @@ -128,11 +128,11 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { where Ty: TyAbiInterface<'a, C> + Copy, { - match self.abi { - Abi::Uninhabited => Err(Heterogeneous), + match self.backend_repr { + BackendRepr::Uninhabited => Err(Heterogeneous), // The primitive for this algorithm. - Abi::Scalar(scalar) => { + BackendRepr::Scalar(scalar) => { let kind = match scalar.primitive() { abi::Int(..) | abi::Pointer(_) => RegKind::Integer, abi::Float(_) => RegKind::Float, @@ -140,7 +140,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size })) } - Abi::Vector { .. } => { + BackendRepr::Vector { .. } => { assert!(!self.is_zst()); Ok(HomogeneousAggregate::Homogeneous(Reg { kind: RegKind::Vector, @@ -148,7 +148,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { })) } - Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => { + BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => { // Helper for computing `homogeneous_aggregate`, allowing a custom // starting offset (used below for handling variants). let from_fields_at = @@ -246,7 +246,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { Ok(result) } } - Abi::Aggregate { sized: false } => Err(Heterogeneous), + BackendRepr::Memory { sized: false } => Err(Heterogeneous), } } } diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 86de39b8f97..e6d66f608da 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -6,7 +6,7 @@ use rustc_index::Idx; use tracing::debug; use crate::{ - Abi, AbiAndPrefAlign, Align, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer, + AbiAndPrefAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer, LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding, Variants, WrappingRange, }; @@ -125,7 +125,7 @@ impl LayoutCalculator { offsets: [Size::ZERO, b_offset].into(), memory_index: [0, 1].into(), }, - abi: Abi::ScalarPair(a, b), + backend_repr: BackendRepr::ScalarPair(a, b), largest_niche, align, size, @@ -216,7 +216,7 @@ impl LayoutCalculator { LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldsShape::Primitive, - abi: Abi::Uninhabited, + backend_repr: BackendRepr::Uninhabited, largest_niche: None, align: dl.i8_align, size: Size::ZERO, @@ -331,7 +331,7 @@ impl LayoutCalculator { if let Ok(common) = common_non_zst_abi_and_align { // Discard valid range information and allow undef - let field_abi = field.abi.to_union(); + let field_abi = field.backend_repr.to_union(); if let Some((common_abi, common_align)) = common { if common_abi != field_abi { @@ -340,7 +340,7 @@ impl LayoutCalculator { } else { // Fields with the same non-Aggregate ABI should also // have the same alignment - if !matches!(common_abi, Abi::Aggregate { .. }) { + if !matches!(common_abi, BackendRepr::Memory { .. }) { assert_eq!( common_align, field.align.abi, "non-Aggregate field with matching ABI but differing alignment" @@ -369,11 +369,11 @@ impl LayoutCalculator { // If all non-ZST fields have the same ABI, we may forward that ABI // for the union as a whole, unless otherwise inhibited. let abi = match common_non_zst_abi_and_align { - Err(AbiMismatch) | Ok(None) => Abi::Aggregate { sized: true }, + Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true }, Ok(Some((abi, _))) => { if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) { // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt - Abi::Aggregate { sized: true } + BackendRepr::Memory { sized: true } } else { abi } @@ -387,7 +387,7 @@ impl LayoutCalculator { Ok(LayoutData { variants: Variants::Single { index: only_variant_idx }, fields: FieldsShape::Union(union_field_count), - abi, + backend_repr: abi, largest_niche: None, align, size: size.align_to(align.abi), @@ -434,23 +434,23 @@ impl LayoutCalculator { // Already doesn't have any niches Scalar::Union { .. } => {} }; - match &mut st.abi { - Abi::Uninhabited => {} - Abi::Scalar(scalar) => hide_niches(scalar), - Abi::ScalarPair(a, b) => { + match &mut st.backend_repr { + BackendRepr::Uninhabited => {} + BackendRepr::Scalar(scalar) => hide_niches(scalar), + BackendRepr::ScalarPair(a, b) => { hide_niches(a); hide_niches(b); } - Abi::Vector { element, count: _ } => hide_niches(element), - Abi::Aggregate { sized: _ } => {} + BackendRepr::Vector { element, count: _ } => hide_niches(element), + BackendRepr::Memory { sized: _ } => {} } st.largest_niche = None; return Ok(st); } let (start, end) = scalar_valid_range; - match st.abi { - Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { + match st.backend_repr { + BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => { // Enlarging validity ranges would result in missed // optimizations, *not* wrongly assuming the inner // value is valid. e.g. unions already enlarge validity ranges, @@ -607,8 +607,8 @@ impl LayoutCalculator { } // It can't be a Scalar or ScalarPair because the offset isn't 0. - if !layout.abi.is_uninhabited() { - layout.abi = Abi::Aggregate { sized: true }; + if !layout.is_uninhabited() { + layout.backend_repr = BackendRepr::Memory { sized: true }; } layout.size += this_offset; @@ -627,26 +627,26 @@ impl LayoutCalculator { let same_size = size == variant_layouts[largest_variant_index].size; let same_align = align == variant_layouts[largest_variant_index].align; - let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) { - Abi::Uninhabited + let abi = if variant_layouts.iter().all(|v| v.is_uninhabited()) { + BackendRepr::Uninhabited } else if same_size && same_align && others_zst { - match variant_layouts[largest_variant_index].abi { + match variant_layouts[largest_variant_index].backend_repr { // When the total alignment and size match, we can use the // same ABI as the scalar variant with the reserved niche. - Abi::Scalar(_) => Abi::Scalar(niche_scalar), - Abi::ScalarPair(first, second) => { + BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar), + BackendRepr::ScalarPair(first, second) => { // Only the niche is guaranteed to be initialised, // so use union layouts for the other primitive. if niche_offset == Size::ZERO { - Abi::ScalarPair(niche_scalar, second.to_union()) + BackendRepr::ScalarPair(niche_scalar, second.to_union()) } else { - Abi::ScalarPair(first.to_union(), niche_scalar) + BackendRepr::ScalarPair(first.to_union(), niche_scalar) } } - _ => Abi::Aggregate { sized: true }, + _ => BackendRepr::Memory { sized: true }, } } else { - Abi::Aggregate { sized: true } + BackendRepr::Memory { sized: true } }; let layout = LayoutData { @@ -664,7 +664,7 @@ impl LayoutCalculator { offsets: [niche_offset].into(), memory_index: [0].into(), }, - abi, + backend_repr: abi, largest_niche, size, align, @@ -833,14 +833,14 @@ impl LayoutCalculator { end: (max as u128 & tag_mask), }, }; - let mut abi = Abi::Aggregate { sized: true }; + let mut abi = BackendRepr::Memory { sized: true }; - if layout_variants.iter().all(|v| v.abi.is_uninhabited()) { - abi = Abi::Uninhabited; + if layout_variants.iter().all(|v| v.is_uninhabited()) { + abi = BackendRepr::Uninhabited; } else if tag.size(dl) == size { // Make sure we only use scalar layout when the enum is entirely its // own tag (i.e. it has no padding nor any non-ZST variant fields). - abi = Abi::Scalar(tag); + abi = BackendRepr::Scalar(tag); } else { // Try to use a ScalarPair for all tagged enums. // That's possible only if we can find a common primitive type for all variants. @@ -864,8 +864,8 @@ impl LayoutCalculator { break; } }; - let prim = match field.abi { - Abi::Scalar(scalar) => { + let prim = match field.backend_repr { + BackendRepr::Scalar(scalar) => { common_prim_initialized_in_all_variants &= matches!(scalar, Scalar::Initialized { .. }); scalar.primitive() @@ -934,7 +934,7 @@ impl LayoutCalculator { { // We can use `ScalarPair` only when it matches our // already computed layout (including `#[repr(C)]`). - abi = pair.abi; + abi = pair.backend_repr; } } } @@ -942,12 +942,14 @@ impl LayoutCalculator { // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the // variants to ensure they are consistent. This is because a downcast is // semantically a NOP, and thus should not affect layout. - if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { + if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) { for variant in &mut layout_variants { // We only do this for variants with fields; the others are not accessed anyway. // Also do not overwrite any already existing "clever" ABIs. - if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) { - variant.abi = abi; + if variant.fields.count() > 0 + && matches!(variant.backend_repr, BackendRepr::Memory { .. }) + { + variant.backend_repr = abi; // Also need to bump up the size and alignment, so that the entire value fits // in here. variant.size = cmp::max(variant.size, size); @@ -970,7 +972,7 @@ impl LayoutCalculator { memory_index: [0].into(), }, largest_niche, - abi, + backend_repr: abi, align, size, max_repr_align, @@ -1252,7 +1254,7 @@ impl LayoutCalculator { } let mut layout_of_single_non_zst_field = None; let sized = unsized_field.is_none(); - let mut abi = Abi::Aggregate { sized }; + let mut abi = BackendRepr::Memory { sized }; let optimize_abi = !repr.inhibit_newtype_abi_optimization(); @@ -1270,16 +1272,16 @@ impl LayoutCalculator { // Field fills the struct and it has a scalar or scalar pair ABI. if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size { - match field.abi { + match field.backend_repr { // For plain scalars, or vectors of them, we can't unpack // newtypes for `#[repr(C)]`, as that affects C ABIs. - Abi::Scalar(_) | Abi::Vector { .. } if optimize_abi => { - abi = field.abi; + BackendRepr::Scalar(_) | BackendRepr::Vector { .. } if optimize_abi => { + abi = field.backend_repr; } // But scalar pairs are Rust-specific and get // treated as aggregates by C ABIs anyway. - Abi::ScalarPair(..) => { - abi = field.abi; + BackendRepr::ScalarPair(..) => { + abi = field.backend_repr; } _ => {} } @@ -1288,8 +1290,8 @@ impl LayoutCalculator { // Two non-ZST fields, and they're both scalars. (Some((i, a)), Some((j, b)), None) => { - match (a.abi, b.abi) { - (Abi::Scalar(a), Abi::Scalar(b)) => { + match (a.backend_repr, b.backend_repr) { + (BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => { // Order by the memory placement, not source order. let ((i, a), (j, b)) = if offsets[i] < offsets[j] { ((i, a), (j, b)) @@ -1315,7 +1317,7 @@ impl LayoutCalculator { { // We can use `ScalarPair` only when it matches our // already computed layout (including `#[repr(C)]`). - abi = pair.abi; + abi = pair.backend_repr; } } _ => {} @@ -1325,8 +1327,8 @@ impl LayoutCalculator { _ => {} } } - if fields.iter().any(|f| f.abi.is_uninhabited()) { - abi = Abi::Uninhabited; + if fields.iter().any(|f| f.is_uninhabited()) { + abi = BackendRepr::Uninhabited; } let unadjusted_abi_align = if repr.transparent() { @@ -1344,7 +1346,7 @@ impl LayoutCalculator { Ok(LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldsShape::Arbitrary { offsets, memory_index }, - abi, + backend_repr: abi, largest_niche, align, size, diff --git a/compiler/rustc_abi/src/layout/ty.rs b/compiler/rustc_abi/src/layout/ty.rs index e029e1426b2..062447ea03f 100644 --- a/compiler/rustc_abi/src/layout/ty.rs +++ b/compiler/rustc_abi/src/layout/ty.rs @@ -83,8 +83,8 @@ impl<'a> Layout<'a> { &self.0.0.variants } - pub fn abi(self) -> Abi { - self.0.0.abi + pub fn backend_repr(self) -> BackendRepr { + self.0.0.backend_repr } pub fn largest_niche(self) -> Option { @@ -114,7 +114,7 @@ impl<'a> Layout<'a> { pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool { self.size() == data_layout.pointer_size && self.align().abi == data_layout.pointer_align.abi - && matches!(self.abi(), Abi::Scalar(Scalar::Initialized { .. })) + && matches!(self.backend_repr(), BackendRepr::Scalar(Scalar::Initialized { .. })) } } @@ -196,9 +196,9 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { Ty: TyAbiInterface<'a, C>, C: HasDataLayout, { - match self.abi { - Abi::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)), - Abi::Aggregate { .. } => { + match self.backend_repr { + BackendRepr::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)), + BackendRepr::Memory { .. } => { if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 { self.field(cx, 0).is_single_fp_element(cx) } else { diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 41922aee648..fac1122c4df 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -1344,11 +1344,19 @@ impl AddressSpace { pub const DATA: Self = AddressSpace(0); } -/// Describes how values of the type are passed by target ABIs, -/// in terms of categories of C types there are ABI rules for. +/// The way we represent values to the backend +/// +/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI. +/// In reality, this implies little about that, but is mostly used to describe the syntactic form +/// emitted for the backend, as most backends handle SSA values and blobs of memory differently. +/// The psABI may need consideration in doing so, but this enum does not constitute a promise for +/// how the value will be lowered to the calling convention, in itself. +/// +/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector, +/// and larger values will usually prefer to be represented as memory. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[cfg_attr(feature = "nightly", derive(HashStable_Generic))] -pub enum Abi { +pub enum BackendRepr { Uninhabited, Scalar(Scalar), ScalarPair(Scalar, Scalar), @@ -1356,19 +1364,23 @@ pub enum Abi { element: Scalar, count: u64, }, - Aggregate { + // FIXME: I sometimes use memory, sometimes use an IR aggregate! + Memory { /// If true, the size is exact, otherwise it's only a lower bound. sized: bool, }, } -impl Abi { +impl BackendRepr { /// Returns `true` if the layout corresponds to an unsized type. #[inline] pub fn is_unsized(&self) -> bool { match *self { - Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, - Abi::Aggregate { sized } => !sized, + BackendRepr::Uninhabited + | BackendRepr::Scalar(_) + | BackendRepr::ScalarPair(..) + | BackendRepr::Vector { .. } => false, + BackendRepr::Memory { sized } => !sized, } } @@ -1381,7 +1393,7 @@ impl Abi { #[inline] pub fn is_signed(&self) -> bool { match self { - Abi::Scalar(scal) => match scal.primitive() { + BackendRepr::Scalar(scal) => match scal.primitive() { Primitive::Int(_, signed) => signed, _ => false, }, @@ -1392,61 +1404,67 @@ impl Abi { /// Returns `true` if this is an uninhabited type #[inline] pub fn is_uninhabited(&self) -> bool { - matches!(*self, Abi::Uninhabited) + matches!(*self, BackendRepr::Uninhabited) } /// Returns `true` if this is a scalar type #[inline] pub fn is_scalar(&self) -> bool { - matches!(*self, Abi::Scalar(_)) + matches!(*self, BackendRepr::Scalar(_)) } /// Returns `true` if this is a bool #[inline] pub fn is_bool(&self) -> bool { - matches!(*self, Abi::Scalar(s) if s.is_bool()) + matches!(*self, BackendRepr::Scalar(s) if s.is_bool()) } /// Returns the fixed alignment of this ABI, if any is mandated. pub fn inherent_align(&self, cx: &C) -> Option { Some(match *self { - Abi::Scalar(s) => s.align(cx), - Abi::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)), - Abi::Vector { element, count } => { + BackendRepr::Scalar(s) => s.align(cx), + BackendRepr::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)), + BackendRepr::Vector { element, count } => { cx.data_layout().vector_align(element.size(cx) * count) } - Abi::Uninhabited | Abi::Aggregate { .. } => return None, + BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None, }) } /// Returns the fixed size of this ABI, if any is mandated. pub fn inherent_size(&self, cx: &C) -> Option { Some(match *self { - Abi::Scalar(s) => { + BackendRepr::Scalar(s) => { // No padding in scalars. s.size(cx) } - Abi::ScalarPair(s1, s2) => { + BackendRepr::ScalarPair(s1, s2) => { // May have some padding between the pair. let field2_offset = s1.size(cx).align_to(s2.align(cx).abi); (field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi) } - Abi::Vector { element, count } => { + BackendRepr::Vector { element, count } => { // No padding in vectors, except possibly for trailing padding // to make the size a multiple of align (e.g. for vectors of size 3). (element.size(cx) * count).align_to(self.inherent_align(cx)?.abi) } - Abi::Uninhabited | Abi::Aggregate { .. } => return None, + BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None, }) } /// Discard validity range information and allow undef. pub fn to_union(&self) -> Self { match *self { - Abi::Scalar(s) => Abi::Scalar(s.to_union()), - Abi::ScalarPair(s1, s2) => Abi::ScalarPair(s1.to_union(), s2.to_union()), - Abi::Vector { element, count } => Abi::Vector { element: element.to_union(), count }, - Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true }, + BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()), + BackendRepr::ScalarPair(s1, s2) => { + BackendRepr::ScalarPair(s1.to_union(), s2.to_union()) + } + BackendRepr::Vector { element, count } => { + BackendRepr::Vector { element: element.to_union(), count } + } + BackendRepr::Uninhabited | BackendRepr::Memory { .. } => { + BackendRepr::Memory { sized: true } + } } } @@ -1454,12 +1472,12 @@ impl Abi { match (self, other) { // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges. // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x). - (Abi::Scalar(l), Abi::Scalar(r)) => l.primitive() == r.primitive(), + (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(), ( - Abi::Vector { element: element_l, count: count_l }, - Abi::Vector { element: element_r, count: count_r }, + BackendRepr::Vector { element: element_l, count: count_l }, + BackendRepr::Vector { element: element_r, count: count_r }, ) => element_l.primitive() == element_r.primitive() && count_l == count_r, - (Abi::ScalarPair(l1, l2), Abi::ScalarPair(r1, r2)) => { + (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => { l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive() } // Everything else must be strictly identical. @@ -1616,14 +1634,14 @@ pub struct LayoutData { /// must be taken into account. pub variants: Variants, - /// The `abi` defines how this data is passed between functions, and it defines - /// value restrictions via `valid_range`. + /// The `backend_repr` defines how this data will be represented to the codegen backend, + /// and encodes value restrictions via `valid_range`. /// /// Note that this is entirely orthogonal to the recursive structure defined by /// `variants` and `fields`; for example, `ManuallyDrop>` has - /// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants` + /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants` /// have to be taken into account to find all fields of this layout. - pub abi: Abi, + pub backend_repr: BackendRepr, /// The leaf scalar with the largest number of invalid values /// (i.e. outside of its `valid_range`), if it exists. @@ -1646,15 +1664,15 @@ pub struct LayoutData { impl LayoutData { /// Returns `true` if this is an aggregate type (including a ScalarPair!) pub fn is_aggregate(&self) -> bool { - match self.abi { - Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false, - Abi::ScalarPair(..) | Abi::Aggregate { .. } => true, + match self.backend_repr { + BackendRepr::Uninhabited | BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false, + BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true, } } /// Returns `true` if this is an uninhabited type pub fn is_uninhabited(&self) -> bool { - self.abi.is_uninhabited() + self.backend_repr.is_uninhabited() } pub fn scalar(cx: &C, scalar: Scalar) -> Self { @@ -1664,7 +1682,7 @@ impl LayoutData { LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldsShape::Primitive, - abi: Abi::Scalar(scalar), + backend_repr: BackendRepr::Scalar(scalar), largest_niche, size, align, @@ -1686,7 +1704,7 @@ where let LayoutData { size, align, - abi, + backend_repr, fields, largest_niche, variants, @@ -1696,7 +1714,7 @@ where f.debug_struct("Layout") .field("size", size) .field("align", align) - .field("abi", abi) + .field("abi", backend_repr) .field("fields", fields) .field("largest_niche", largest_niche) .field("variants", variants) @@ -1732,12 +1750,12 @@ impl LayoutData { /// Returns `true` if the layout corresponds to an unsized type. #[inline] pub fn is_unsized(&self) -> bool { - self.abi.is_unsized() + self.backend_repr.is_unsized() } #[inline] pub fn is_sized(&self) -> bool { - self.abi.is_sized() + self.backend_repr.is_sized() } /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1). @@ -1750,10 +1768,12 @@ impl LayoutData { /// Note that this does *not* imply that the type is irrelevant for layout! It can still have /// non-trivial alignment constraints. You probably want to use `is_1zst` instead. pub fn is_zst(&self) -> bool { - match self.abi { - Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false, - Abi::Uninhabited => self.size.bytes() == 0, - Abi::Aggregate { sized } => sized && self.size.bytes() == 0, + match self.backend_repr { + BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => { + false + } + BackendRepr::Uninhabited => self.size.bytes() == 0, + BackendRepr::Memory { sized } => sized && self.size.bytes() == 0, } } @@ -1768,8 +1788,8 @@ impl LayoutData { // 2nd point is quite hard to check though. self.size == other.size && self.is_sized() == other.is_sized() - && self.abi.eq_up_to_validity(&other.abi) - && self.abi.is_bool() == other.abi.is_bool() + && self.backend_repr.eq_up_to_validity(&other.backend_repr) + && self.backend_repr.is_bool() == other.backend_repr.is_bool() && self.align.abi == other.align.abi && self.max_repr_align == other.max_repr_align && self.unadjusted_abi_align == other.unadjusted_abi_align diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 8a1ee48c43c..855ca010611 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -458,7 +458,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { match &self.ret.mode { PassMode::Direct(attrs) => { attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn); - if let abi::Abi::Scalar(scalar) = self.ret.layout.abi { + if let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr { apply_range_attr(llvm::AttributePlace::ReturnValue, scalar); } } @@ -495,7 +495,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } PassMode::Direct(attrs) => { let i = apply(attrs); - if let abi::Abi::Scalar(scalar) = arg.layout.abi { + if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr { apply_range_attr(llvm::AttributePlace::Argument(i), scalar); } } @@ -510,7 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { PassMode::Pair(a, b) => { let i = apply(a); let ii = apply(b); - if let abi::Abi::ScalarPair(scalar_a, scalar_b) = arg.layout.abi { + if let abi::BackendRepr::ScalarPair(scalar_a, scalar_b) = + arg.layout.backend_repr + { apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a); apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b); } @@ -570,7 +572,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } if bx.cx.sess().opts.optimize != config::OptLevel::No && llvm_util::get_version() < (19, 0, 0) - && let abi::Abi::Scalar(scalar) = self.ret.layout.abi + && let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr && matches!(scalar.primitive(), Int(..)) // If the value is a boolean, the range is 0..2 and that ultimately // become 0..0 when the type becomes i1, which would be rejected diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index 3c30822a2e2..53758967552 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -880,8 +880,8 @@ fn llvm_fixup_input<'ll, 'tcx>( ) -> &'ll Value { use InlineAsmRegClass::*; let dl = &bx.tcx.data_layout; - match (reg, layout.abi) { - (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + match (reg, layout.backend_repr) { + (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8); bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) @@ -889,7 +889,7 @@ fn llvm_fixup_input<'ll, 'tcx>( value } } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s)) if s.primitive() != Primitive::Float(Float::F128) => { let elem_ty = llvm_asm_scalar_type(bx.cx, s); @@ -902,7 +902,7 @@ fn llvm_fixup_input<'ll, 'tcx>( } bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); @@ -910,14 +910,14 @@ fn llvm_fixup_input<'ll, 'tcx>( let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } - (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { bx.bitcast(value, bx.cx.type_i64()) } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - Abi::Vector { .. }, + BackendRepr::Vector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)), ( X86( @@ -925,7 +925,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if bx.sess().asm_arch == Some(InlineAsmArch::X86) && s.primitive() == Primitive::Float(Float::F128) => { @@ -937,7 +937,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if s.primitive() == Primitive::Float(Float::F16) => { let value = bx.insert_element( bx.const_undef(bx.type_vector(bx.type_f16(), 8)), @@ -952,11 +952,14 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Vector { element, count: count @ (8 | 16) }, + BackendRepr::Vector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } - (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { + ( + Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), + BackendRepr::Scalar(s), + ) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_f32()) } else { @@ -969,7 +972,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) => { if let Primitive::Int(Integer::I64, _) = s.primitive() { bx.bitcast(value, bx.cx.type_f64()) @@ -986,11 +989,11 @@ fn llvm_fixup_input<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - Abi::Vector { element, count: count @ (4 | 8) }, + BackendRepr::Vector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } - (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()), @@ -999,7 +1002,7 @@ fn llvm_fixup_input<'ll, 'tcx>( _ => value, } } - (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) => { @@ -1022,15 +1025,15 @@ fn llvm_fixup_output<'ll, 'tcx>( instance: Instance<'_>, ) -> &'ll Value { use InlineAsmRegClass::*; - match (reg, layout.abi) { - (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + match (reg, layout.backend_repr) { + (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { bx.extract_element(value, bx.const_i32(0)) } else { value } } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s)) if s.primitive() != Primitive::Float(Float::F128) => { value = bx.extract_element(value, bx.const_i32(0)); @@ -1039,7 +1042,7 @@ fn llvm_fixup_output<'ll, 'tcx>( } value } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); @@ -1047,14 +1050,14 @@ fn llvm_fixup_output<'ll, 'tcx>( let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect(); bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices)) } - (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { bx.bitcast(value, bx.cx.type_f64()) } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - Abi::Vector { .. }, + BackendRepr::Vector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)), ( X86( @@ -1062,7 +1065,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if bx.sess().asm_arch == Some(InlineAsmArch::X86) && s.primitive() == Primitive::Float(Float::F128) => { @@ -1074,7 +1077,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if s.primitive() == Primitive::Float(Float::F16) => { let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8)); bx.extract_element(value, bx.const_usize(0)) @@ -1085,11 +1088,14 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Vector { element, count: count @ (8 | 16) }, + BackendRepr::Vector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } - (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { + ( + Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), + BackendRepr::Scalar(s), + ) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { bx.bitcast(value, bx.cx.type_i32()) } else { @@ -1102,7 +1108,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) => { if let Primitive::Int(Integer::I64, _) = s.primitive() { bx.bitcast(value, bx.cx.type_i64()) @@ -1119,11 +1125,11 @@ fn llvm_fixup_output<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - Abi::Vector { element, count: count @ (4 | 8) }, + BackendRepr::Vector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } - (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()), @@ -1133,7 +1139,7 @@ fn llvm_fixup_output<'ll, 'tcx>( _ => value, } } - (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) => { @@ -1153,35 +1159,35 @@ fn llvm_fixup_output_type<'ll, 'tcx>( instance: Instance<'_>, ) -> &'ll Type { use InlineAsmRegClass::*; - match (reg, layout.abi) { - (AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { + match (reg, layout.backend_repr) { + (AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => { if let Primitive::Int(Integer::I8, _) = s.primitive() { cx.type_vector(cx.type_i8(), 8) } else { layout.llvm_type(cx) } } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s)) if s.primitive() != Primitive::Float(Float::F128) => { let elem_ty = llvm_asm_scalar_type(cx, s); let count = 16 / layout.size.bytes(); cx.type_vector(elem_ty, count) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count }) + (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(cx, element); cx.type_vector(elem_ty, count * 2) } - (X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s)) + (X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F64) => { cx.type_i64() } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - Abi::Vector { .. }, + BackendRepr::Vector { .. }, ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8), ( X86( @@ -1189,7 +1195,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if cx.sess().asm_arch == Some(InlineAsmArch::X86) && s.primitive() == Primitive::Float(Float::F128) => { @@ -1201,7 +1207,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8), ( X86( @@ -1209,11 +1215,14 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - Abi::Vector { element, count: count @ (8 | 16) }, + BackendRepr::Vector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } - (Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => { + ( + Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), + BackendRepr::Scalar(s), + ) => { if let Primitive::Int(Integer::I32, _) = s.primitive() { cx.type_f32() } else { @@ -1226,7 +1235,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | ArmInlineAsmRegClass::dreg_low8 | ArmInlineAsmRegClass::dreg_low16, ), - Abi::Scalar(s), + BackendRepr::Scalar(s), ) => { if let Primitive::Int(Integer::I64, _) = s.primitive() { cx.type_f64() @@ -1243,11 +1252,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - Abi::Vector { element, count: count @ (4 | 8) }, + BackendRepr::Vector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } - (Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => { + (Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => { match s.primitive() { // MIPS only supports register-length arithmetics. Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(), @@ -1256,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( _ => layout.llvm_type(cx), } } - (RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s)) + (RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s)) if s.primitive() == Primitive::Float(Float::F16) && !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) => { diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 8702532c36e..8e87869f946 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -545,13 +545,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } let llval = const_llval.unwrap_or_else(|| { let load = self.load(llty, place.val.llval, place.val.align); - if let abi::Abi::Scalar(scalar) = place.layout.abi { + if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr { scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO); } load }); OperandValue::Immediate(self.to_immediate(llval, place.layout)) - } else if let abi::Abi::ScalarPair(a, b) = place.layout.abi { + } else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr { let b_offset = a.size(self).align_to(b.align(self).abi); let mut load = |i, scalar: abi::Scalar, layout, align, offset| { diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index d04b5257619..c77e00aed9a 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -258,8 +258,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()]) } sym::va_arg => { - match fn_abi.ret.layout.abi { - abi::Abi::Scalar(scalar) => { + match fn_abi.ret.layout.backend_repr { + abi::BackendRepr::Scalar(scalar) => { match scalar.primitive() { Primitive::Int(..) => { if self.cx().size_of(ret_ty).bytes() < 4 { @@ -436,13 +436,13 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } sym::raw_eq => { - use abi::Abi::*; + use abi::BackendRepr::*; let tp_ty = fn_args.type_at(0); let layout = self.layout_of(tp_ty).layout; - let use_integer_compare = match layout.abi() { + let use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, Uninhabited | Vector { .. } => false, - Aggregate { .. } => { + Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), // so we re-use that same threshold here. @@ -549,7 +549,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { } let llret_ty = if ret_ty.is_simd() - && let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi + && let abi::BackendRepr::Memory { .. } = + self.layout_of(ret_ty).layout.backend_repr { let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx()); let elem_ll_ty = match elem_ty.kind() { diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index 6be4c3f034f..2b05e24a7ba 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -1,7 +1,7 @@ use std::fmt::Write; use rustc_abi::Primitive::{Float, Int, Pointer}; -use rustc_abi::{Abi, Align, FieldsShape, Scalar, Size, Variants}; +use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants}; use rustc_codegen_ssa::traits::*; use rustc_middle::bug; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; @@ -17,13 +17,13 @@ fn uncached_llvm_type<'a, 'tcx>( layout: TyAndLayout<'tcx>, defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>, ) -> &'a Type { - match layout.abi { - Abi::Scalar(_) => bug!("handled elsewhere"), - Abi::Vector { element, count } => { + match layout.backend_repr { + BackendRepr::Scalar(_) => bug!("handled elsewhere"), + BackendRepr::Vector { element, count } => { let element = layout.scalar_llvm_type_at(cx, element); return cx.type_vector(element, count); } - Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {} + BackendRepr::Uninhabited | BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {} } let name = match layout.ty.kind() { @@ -170,16 +170,21 @@ pub(crate) trait LayoutLlvmExt<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_immediate(&self) -> bool { - match self.abi { - Abi::Scalar(_) | Abi::Vector { .. } => true, - Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false, + match self.backend_repr { + BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true, + BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => { + false + } } } fn is_llvm_scalar_pair(&self) -> bool { - match self.abi { - Abi::ScalarPair(..) => true, - Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false, + match self.backend_repr { + BackendRepr::ScalarPair(..) => true, + BackendRepr::Uninhabited + | BackendRepr::Scalar(_) + | BackendRepr::Vector { .. } + | BackendRepr::Memory { .. } => false, } } @@ -198,7 +203,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { // This must produce the same result for `repr(transparent)` wrappers as for the inner type! // In other words, this should generally not look at the type at all, but only at the // layout. - if let Abi::Scalar(scalar) = self.abi { + if let BackendRepr::Scalar(scalar) = self.backend_repr { // Use a different cache for scalars because pointers to DSTs // can be either wide or thin (data pointers of wide pointers). if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) { @@ -248,13 +253,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { } fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { - match self.abi { - Abi::Scalar(scalar) => { + match self.backend_repr { + BackendRepr::Scalar(scalar) => { if scalar.is_bool() { return cx.type_i1(); } } - Abi::ScalarPair(..) => { + BackendRepr::ScalarPair(..) => { // An immediate pair always contains just the two elements, without any padding // filler, as it should never be stored to memory. return cx.type_struct( @@ -287,7 +292,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { // This must produce the same result for `repr(transparent)` wrappers as for the inner type! // In other words, this should generally not look at the type at all, but only at the // layout. - let Abi::ScalarPair(a, b) = self.abi else { + let BackendRepr::ScalarPair(a, b) = self.backend_repr else { bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self); }; let scalar = [a, b][index]; diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index a17a127f014..283740fa664 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -1532,7 +1532,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // the load would just produce `OperandValue::Ref` instead // of the `OperandValue::Immediate` we need for the call. llval = bx.load(bx.backend_type(arg.layout), llval, align); - if let abi::Abi::Scalar(scalar) = arg.layout.abi { + if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr { if scalar.is_bool() { bx.range_metadata(llval, WrappingRange { start: 0, end: 1 }); } diff --git a/compiler/rustc_codegen_ssa/src/mir/constant.rs b/compiler/rustc_codegen_ssa/src/mir/constant.rs index 15f45b226f5..54b9c9cc89f 100644 --- a/compiler/rustc_codegen_ssa/src/mir/constant.rs +++ b/compiler/rustc_codegen_ssa/src/mir/constant.rs @@ -1,8 +1,8 @@ +use rustc_abi::BackendRepr; use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::ty::layout::HasTyCtxt; use rustc_middle::ty::{self, Ty}; use rustc_middle::{bug, mir, span_bug}; -use rustc_target::abi::Abi; use super::FunctionCx; use crate::errors; @@ -86,7 +86,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { .map(|field| { if let Some(prim) = field.try_to_scalar() { let layout = bx.layout_of(field_ty); - let Abi::Scalar(scalar) = layout.abi else { + let BackendRepr::Scalar(scalar) = layout.backend_repr else { bug!("from_const: invalid ByVal layout: {:#?}", layout); }; bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout)) diff --git a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs index 146f55f95c2..21d20475408 100644 --- a/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs +++ b/compiler/rustc_codegen_ssa/src/mir/debuginfo.rs @@ -2,6 +2,7 @@ use std::collections::hash_map::Entry; use std::marker::PhantomData; use std::ops::Range; +use rustc_abi::{BackendRepr, FieldIdx, FieldsShape, Size, VariantIdx}; use rustc_data_structures::fx::FxHashMap; use rustc_index::IndexVec; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; @@ -11,7 +12,6 @@ use rustc_middle::{bug, mir, ty}; use rustc_session::config::DebugInfo; use rustc_span::symbol::{Symbol, kw}; use rustc_span::{BytePos, Span, hygiene}; -use rustc_target::abi::{Abi, FieldIdx, FieldsShape, Size, VariantIdx}; use super::operand::{OperandRef, OperandValue}; use super::place::{PlaceRef, PlaceValue}; @@ -510,7 +510,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // be marked as a `LocalVariable` for MSVC debuggers to visualize // their data correctly. (See #81894 & #88625) let var_ty_layout = self.cx.layout_of(var_ty); - if let Abi::ScalarPair(_, _) = var_ty_layout.abi { + if let BackendRepr::ScalarPair(_, _) = var_ty_layout.backend_repr { VariableKind::LocalVariable } else { VariableKind::ArgumentVariable(arg_index) diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 88ceff327d0..19101ec2d1b 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -4,7 +4,7 @@ use std::fmt; use arrayvec::ArrayVec; use either::Either; use rustc_abi as abi; -use rustc_abi::{Abi, Align, Size}; +use rustc_abi::{Align, BackendRepr, Size}; use rustc_middle::bug; use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range}; use rustc_middle::mir::{self, ConstValue}; @@ -163,7 +163,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let val = match val { ConstValue::Scalar(x) => { - let Abi::Scalar(scalar) = layout.abi else { + let BackendRepr::Scalar(scalar) = layout.backend_repr else { bug!("from_const: invalid ByVal layout: {:#?}", layout); }; let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout)); @@ -171,7 +171,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } ConstValue::ZeroSized => return OperandRef::zero_sized(layout), ConstValue::Slice { data, meta } => { - let Abi::ScalarPair(a_scalar, _) = layout.abi else { + let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else { bug!("from_const: invalid ScalarPair layout: {:#?}", layout); }; let a = Scalar::from_pointer( @@ -221,14 +221,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { // case where some of the bytes are initialized and others are not. So, we need an extra // check that walks over the type of `mplace` to make sure it is truly correct to treat this // like a `Scalar` (or `ScalarPair`). - match layout.abi { - Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => { + match layout.backend_repr { + BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => { let size = s.size(bx); assert_eq!(size, layout.size, "abi::Scalar size does not match layout size"); let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout)); OperandRef { val: OperandValue::Immediate(val), layout } } - Abi::ScalarPair( + BackendRepr::ScalarPair( a @ abi::Scalar::Initialized { .. }, b @ abi::Scalar::Initialized { .. }, ) => { @@ -322,7 +322,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { llval: V, layout: TyAndLayout<'tcx>, ) -> Self { - let val = if let Abi::ScalarPair(..) = layout.abi { + let val = if let BackendRepr::ScalarPair(..) = layout.backend_repr { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); // Deconstruct the immediate aggregate. @@ -343,7 +343,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let field = self.layout.field(bx.cx(), i); let offset = self.layout.fields.offset(i); - let mut val = match (self.val, self.layout.abi) { + let mut val = match (self.val, self.layout.backend_repr) { // If the field is ZST, it has no data. _ if field.is_zst() => OperandValue::ZeroSized, @@ -356,7 +356,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } // Extract a scalar component from a pair. - (OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => { + (OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => { if offset.bytes() == 0 { assert_eq!(field.size, a.size(bx.cx())); OperandValue::Immediate(a_llval) @@ -368,30 +368,30 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } // `#[repr(simd)]` types are also immediate. - (OperandValue::Immediate(llval), Abi::Vector { .. }) => { + (OperandValue::Immediate(llval), BackendRepr::Vector { .. }) => { OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self), }; - match (&mut val, field.abi) { + match (&mut val, field.backend_repr) { (OperandValue::ZeroSized, _) => {} ( OperandValue::Immediate(llval), - Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. }, + BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. }, ) => { // Bools in union fields needs to be truncated. *llval = bx.to_immediate(*llval, field); } - (OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => { + (OperandValue::Pair(a, b), BackendRepr::ScalarPair(a_abi, b_abi)) => { // Bools in union fields needs to be truncated. *a = bx.to_immediate_scalar(*a, a_abi); *b = bx.to_immediate_scalar(*b, b_abi); } // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]); - (OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => { - assert_matches!(self.layout.abi, Abi::Vector { .. }); + (OperandValue::Immediate(llval), BackendRepr::Memory { sized: true }) => { + assert_matches!(self.layout.backend_repr, BackendRepr::Vector { .. }); let llfield_ty = bx.cx().backend_type(field); @@ -400,7 +400,10 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { bx.store(*llval, llptr, field.align.abi); *llval = bx.load(llfield_ty, llptr, field.align.abi); } - (OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => { + ( + OperandValue::Immediate(_), + BackendRepr::Uninhabited | BackendRepr::Memory { sized: false }, + ) => { bug!() } (OperandValue::Pair(..), _) => bug!(), @@ -494,7 +497,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { bx.store_with_flags(val, dest.val.llval, dest.val.align, flags); } OperandValue::Pair(a, b) => { - let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else { + let BackendRepr::ScalarPair(a_scalar, b_scalar) = dest.layout.backend_repr else { bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout); }; let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi); @@ -645,7 +648,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // However, some SIMD types do not actually use the vector ABI // (in particular, packed SIMD types do not). Ensure we exclude those. let layout = bx.layout_of(constant_ty); - if let Abi::Vector { .. } = layout.abi { + if let BackendRepr::Vector { .. } = layout.backend_repr { let (llval, ty) = self.immediate_const_vector(bx, constant); return OperandRef { val: OperandValue::Immediate(llval), diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 6e8c193cd75..86cf0f9614d 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -1136,17 +1136,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandValueKind::ZeroSized } else if self.cx.is_backend_immediate(layout) { assert!(!self.cx.is_backend_scalar_pair(layout)); - OperandValueKind::Immediate(match layout.abi { - abi::Abi::Scalar(s) => s, - abi::Abi::Vector { element, .. } => element, + OperandValueKind::Immediate(match layout.backend_repr { + abi::BackendRepr::Scalar(s) => s, + abi::BackendRepr::Vector { element, .. } => element, x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"), }) } else if self.cx.is_backend_scalar_pair(layout) { - let abi::Abi::ScalarPair(s1, s2) = layout.abi else { + let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else { span_bug!( self.mir.span, "Couldn't translate {:?} as backend scalar pair", - layout.abi, + layout.backend_repr, ); }; OperandValueKind::Pair(s1, s2) diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 50a51714146..768a0439ab5 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -1,13 +1,13 @@ use std::assert_matches::assert_matches; use std::ops::Deref; +use rustc_abi::{Align, BackendRepr, Scalar, Size, WrappingRange}; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; use rustc_middle::ty::{Instance, Ty}; use rustc_session::config::OptLevel; use rustc_span::Span; use rustc_target::abi::call::FnAbi; -use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange}; use super::abi::AbiBuilderMethods; use super::asm::AsmBuilderMethods; @@ -162,7 +162,7 @@ pub trait BuilderMethods<'a, 'tcx>: fn from_immediate(&mut self, val: Self::Value) -> Self::Value; fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value { - if let Abi::Scalar(scalar) = layout.abi { + if let BackendRepr::Scalar(scalar) = layout.backend_repr { self.to_immediate_scalar(val, scalar) } else { val diff --git a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs index 743924faa21..bc2661c4fc7 100644 --- a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs @@ -131,7 +131,7 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine { interp_ok(match bin_op { Eq | Ne | Lt | Le | Gt | Ge => { // Types can differ, e.g. fn ptrs with different `for`. - assert_eq!(left.layout.abi, right.layout.abi); + assert_eq!(left.layout.backend_repr, right.layout.backend_repr); let size = ecx.pointer_size(); // Just compare the bits. ScalarPairs are compared lexicographically. // We thus always compare pairs and simply fill scalars up with 0. diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 7319c251bbd..81b9d73b952 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -1,6 +1,7 @@ use std::sync::atomic::Ordering::Relaxed; use either::{Left, Right}; +use rustc_abi::{self as abi, BackendRepr}; use rustc_hir::def::DefKind; use rustc_middle::bug; use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo}; @@ -12,7 +13,6 @@ use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::def_id::LocalDefId; use rustc_span::{DUMMY_SP, Span}; -use rustc_target::abi::{self, Abi}; use tracing::{debug, instrument, trace}; use super::{CanAccessMutGlobal, CompileTimeInterpCx, CompileTimeMachine}; @@ -174,8 +174,8 @@ pub(super) fn op_to_const<'tcx>( // type (it's used throughout the compiler and having it work just on literals is not enough) // and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar` // from its byte-serialized form). - let force_as_immediate = match op.layout.abi { - Abi::Scalar(abi::Scalar::Initialized { .. }) => true, + let force_as_immediate = match op.layout.backend_repr { + BackendRepr::Scalar(abi::Scalar::Initialized { .. }) => true, // We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the // input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will // not have to generate any duplicate allocations (we preserve the original `AllocId` in diff --git a/compiler/rustc_const_eval/src/const_eval/valtrees.rs b/compiler/rustc_const_eval/src/const_eval/valtrees.rs index 9e80e666ba9..ea88b2ed22e 100644 --- a/compiler/rustc_const_eval/src/const_eval/valtrees.rs +++ b/compiler/rustc_const_eval/src/const_eval/valtrees.rs @@ -1,10 +1,10 @@ +use rustc_abi::{BackendRepr, VariantIdx}; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId}; use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt}; use rustc_middle::{bug, mir}; use rustc_span::DUMMY_SP; -use rustc_target::abi::{Abi, VariantIdx}; use tracing::{debug, instrument, trace}; use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const}; @@ -117,7 +117,7 @@ fn const_to_valtree_inner<'tcx>( let val = ecx.read_immediate(place).unwrap(); // We could allow wide raw pointers where both sides are integers in the future, // but for now we reject them. - if matches!(val.layout.abi, Abi::ScalarPair(..)) { + if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) { return Err(ValTreeCreationError::NonSupportedType(ty)); } let val = val.to_scalar(); @@ -311,7 +311,7 @@ pub fn valtree_to_const_value<'tcx>( // Fast path to avoid some allocations. return mir::ConstValue::ZeroSized; } - if layout.abi.is_scalar() + if layout.backend_repr.is_scalar() && (matches!(ty.kind(), ty::Tuple(_)) || matches!(ty.kind(), ty::Adt(def, _) if def.is_struct())) { diff --git a/compiler/rustc_const_eval/src/interpret/call.rs b/compiler/rustc_const_eval/src/interpret/call.rs index 85d99900c6c..1915bf75c95 100644 --- a/compiler/rustc_const_eval/src/interpret/call.rs +++ b/compiler/rustc_const_eval/src/interpret/call.rs @@ -172,8 +172,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // must be compatible. So we just accept everything with Pointer ABI as compatible, // even if this will accept some code that is not stably guaranteed to work. // This also handles function pointers. - let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi { - abi::Abi::Scalar(s) => match s.primitive() { + let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.backend_repr { + abi::BackendRepr::Scalar(s) => match s.primitive() { abi::Primitive::Pointer(addr_space) => Some(addr_space), _ => None, }, diff --git a/compiler/rustc_const_eval/src/interpret/cast.rs b/compiler/rustc_const_eval/src/interpret/cast.rs index 64b15611316..60d5e904bd9 100644 --- a/compiler/rustc_const_eval/src/interpret/cast.rs +++ b/compiler/rustc_const_eval/src/interpret/cast.rs @@ -274,7 +274,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { cast_ty: Ty<'tcx>, ) -> InterpResult<'tcx, Scalar> { // Let's make sure v is sign-extended *if* it has a signed type. - let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`. + let signed = src_layout.backend_repr.is_signed(); // Also asserts that abi is `Scalar`. let v = match src_layout.ty.kind() { Uint(_) | RawPtr(..) | FnPtr(..) => scalar.to_uint(src_layout.size)?, diff --git a/compiler/rustc_const_eval/src/interpret/discriminant.rs b/compiler/rustc_const_eval/src/interpret/discriminant.rs index feed0860679..bb4ac9556ea 100644 --- a/compiler/rustc_const_eval/src/interpret/discriminant.rs +++ b/compiler/rustc_const_eval/src/interpret/discriminant.rs @@ -112,7 +112,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Read tag and sanity-check `tag_layout`. let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?; assert_eq!(tag_layout.size, tag_val.layout.size); - assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed()); + assert_eq!(tag_layout.backend_repr.is_signed(), tag_val.layout.backend_repr.is_signed()); trace!("tag value: {}", tag_val); // Figure out which discriminant and variant this corresponds to. diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 6148123bdfe..80e14ee887c 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -563,7 +563,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair(); interp_ok(if overflowed.to_bool()? { let size = l.layout.size; - if l.layout.abi.is_signed() { + if l.layout.backend_repr.is_signed() { // For signed ints the saturated value depends on the sign of the first // term since the sign of the second term can be inferred from this and // the fact that the operation has overflowed (if either is 0 no diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index cd5e2aeca85..43ae98e74b0 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -5,7 +5,7 @@ use std::assert_matches::assert_matches; use either::{Either, Left, Right}; use rustc_abi as abi; -use rustc_abi::{Abi, HasDataLayout, Size}; +use rustc_abi::{BackendRepr, HasDataLayout, Size}; use rustc_hir::def::Namespace; use rustc_middle::mir::interpret::ScalarSizeMismatch; use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout}; @@ -114,9 +114,9 @@ impl Immediate { } /// Assert that this immediate is a valid value for the given ABI. - pub fn assert_matches_abi(self, abi: Abi, msg: &str, cx: &impl HasDataLayout) { + pub fn assert_matches_abi(self, abi: BackendRepr, msg: &str, cx: &impl HasDataLayout) { match (self, abi) { - (Immediate::Scalar(scalar), Abi::Scalar(s)) => { + (Immediate::Scalar(scalar), BackendRepr::Scalar(s)) => { assert_eq!(scalar.size(), s.size(cx), "{msg}: scalar value has wrong size"); if !matches!(s.primitive(), abi::Primitive::Pointer(..)) { // This is not a pointer, it should not carry provenance. @@ -126,7 +126,7 @@ impl Immediate { ); } } - (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => { + (Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => { assert_eq!( a_val.size(), a.size(cx), @@ -244,7 +244,7 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> { impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { #[inline] pub fn from_scalar(val: Scalar, layout: TyAndLayout<'tcx>) -> Self { - debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout"); + debug_assert!(layout.backend_repr.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout"); debug_assert_eq!(val.size(), layout.size); ImmTy { imm: val.into(), layout } } @@ -252,7 +252,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { #[inline] pub fn from_scalar_pair(a: Scalar, b: Scalar, layout: TyAndLayout<'tcx>) -> Self { debug_assert!( - matches!(layout.abi, Abi::ScalarPair(..)), + matches!(layout.backend_repr, BackendRepr::ScalarPair(..)), "`ImmTy::from_scalar_pair` on non-scalar-pair layout" ); let imm = Immediate::ScalarPair(a, b); @@ -263,9 +263,9 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { pub fn from_immediate(imm: Immediate, layout: TyAndLayout<'tcx>) -> Self { // Without a `cx` we cannot call `assert_matches_abi`. debug_assert!( - match (imm, layout.abi) { - (Immediate::Scalar(..), Abi::Scalar(..)) => true, - (Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true, + match (imm, layout.backend_repr) { + (Immediate::Scalar(..), BackendRepr::Scalar(..)) => true, + (Immediate::ScalarPair(..), BackendRepr::ScalarPair(..)) => true, (Immediate::Uninit, _) if layout.is_sized() => true, _ => false, }, @@ -356,7 +356,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self { // Verify that the input matches its type. if cfg!(debug_assertions) { - self.assert_matches_abi(self.layout.abi, "invalid input to Immediate::offset", cx); + self.assert_matches_abi( + self.layout.backend_repr, + "invalid input to Immediate::offset", + cx, + ); } // `ImmTy` have already been checked to be in-bounds, so we can just check directly if this // remains in-bounds. This cannot actually be violated since projections are type-checked @@ -370,19 +374,19 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { ); // This makes several assumptions about what layouts we will encounter; we match what // codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`). - let inner_val: Immediate<_> = match (**self, self.layout.abi) { + let inner_val: Immediate<_> = match (**self, self.layout.backend_repr) { // If the entire value is uninit, then so is the field (can happen in ConstProp). (Immediate::Uninit, _) => Immediate::Uninit, // If the field is uninhabited, we can forget the data (can happen in ConstProp). // `enum S { A(!), B, C }` is an example of an enum with Scalar layout that // has an `Uninhabited` variant, which means this case is possible. - _ if layout.abi.is_uninhabited() => Immediate::Uninit, + _ if layout.is_uninhabited() => Immediate::Uninit, // the field contains no information, can be left uninit // (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST) _ if layout.is_zst() => Immediate::Uninit, // some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try // to detect those here and also give them no data - _ if matches!(layout.abi, Abi::Aggregate { .. }) + _ if matches!(layout.backend_repr, BackendRepr::Memory { .. }) && matches!(layout.variants, abi::Variants::Single { .. }) && matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) => { @@ -394,7 +398,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { **self } // extract fields from types with `ScalarPair` ABI - (Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => { + (Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => { Immediate::from(if offset.bytes() == 0 { a_val } else { @@ -411,7 +415,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> { ), }; // Ensure the new layout matches the new value. - inner_val.assert_matches_abi(layout.abi, "invalid field type in Immediate::offset", cx); + inner_val.assert_matches_abi( + layout.backend_repr, + "invalid field type in Immediate::offset", + cx, + ); ImmTy::from_immediate(inner_val, layout) } @@ -567,8 +575,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // case where some of the bytes are initialized and others are not. So, we need an extra // check that walks over the type of `mplace` to make sure it is truly correct to treat this // like a `Scalar` (or `ScalarPair`). - interp_ok(match mplace.layout.abi { - Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => { + interp_ok(match mplace.layout.backend_repr { + BackendRepr::Scalar(abi::Scalar::Initialized { value: s, .. }) => { let size = s.size(self); assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size"); let scalar = alloc.read_scalar( @@ -577,7 +585,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { )?; Some(ImmTy::from_scalar(scalar, mplace.layout)) } - Abi::ScalarPair( + BackendRepr::ScalarPair( abi::Scalar::Initialized { value: a, .. }, abi::Scalar::Initialized { value: b, .. }, ) => { @@ -637,9 +645,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { op: &impl Projectable<'tcx, M::Provenance>, ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> { if !matches!( - op.layout().abi, - Abi::Scalar(abi::Scalar::Initialized { .. }) - | Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. }) + op.layout().backend_repr, + BackendRepr::Scalar(abi::Scalar::Initialized { .. }) + | BackendRepr::ScalarPair( + abi::Scalar::Initialized { .. }, + abi::Scalar::Initialized { .. } + ) ) { span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty); } diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index 380db907481..cf280e0c1ae 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -114,7 +114,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let l_bits = left.layout.size.bits(); // Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is // the one MIR operator that does *not* directly map to a single LLVM operation.) - let (shift_amount, overflow) = if right.layout.abi.is_signed() { + let (shift_amount, overflow) = if right.layout.backend_repr.is_signed() { let shift_amount = r_signed(); let rem = shift_amount.rem_euclid(l_bits.into()); // `rem` is guaranteed positive, so the `unwrap` cannot fail @@ -126,7 +126,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { }; let shift_amount = u32::try_from(shift_amount).unwrap(); // we brought this in the range `0..size` so this will always fit // Compute the shifted result. - let result = if left.layout.abi.is_signed() { + let result = if left.layout.backend_repr.is_signed() { let l = l_signed(); let result = match bin_op { Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(), @@ -147,7 +147,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { if overflow && let Some(intrinsic) = throw_ub_on_overflow { throw_ub!(ShiftOverflow { intrinsic, - shift_amount: if right.layout.abi.is_signed() { + shift_amount: if right.layout.backend_repr.is_signed() { Either::Right(r_signed()) } else { Either::Left(r_unsigned()) @@ -171,7 +171,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let size = left.layout.size; // Operations that need special treatment for signed integers - if left.layout.abi.is_signed() { + if left.layout.backend_repr.is_signed() { let op: Option bool> = match bin_op { Lt => Some(i128::lt), Le => Some(i128::le), @@ -250,7 +250,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { BitXor => ImmTy::from_uint(l ^ r, left.layout), _ => { - assert!(!left.layout.abi.is_signed()); + assert!(!left.layout.backend_repr.is_signed()); let op: fn(u128, u128) -> (u128, bool) = match bin_op { Add | AddUnchecked | AddWithOverflow => u128::overflowing_add, Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub, @@ -332,7 +332,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } let offset_bytes = val.to_target_isize(self)?; - if !right.layout.abi.is_signed() && offset_bytes < 0 { + if !right.layout.backend_repr.is_signed() && offset_bytes < 0 { // We were supposed to do an unsigned offset but the result is negative -- this // can only mean that the cast wrapped around. throw_ub!(PointerArithOverflow) diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 81b926a1b65..139a1db60e0 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -5,11 +5,11 @@ use std::assert_matches::assert_matches; use either::{Either, Left, Right}; +use rustc_abi::{Align, BackendRepr, HasDataLayout, Size}; use rustc_ast::Mutability; use rustc_middle::ty::Ty; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::{bug, mir, span_bug}; -use rustc_target::abi::{Abi, Align, HasDataLayout, Size}; use tracing::{instrument, trace}; use super::{ @@ -659,7 +659,7 @@ where // Unfortunately this is too expensive to do in release builds. if cfg!(debug_assertions) { src.assert_matches_abi( - local_layout.abi, + local_layout.backend_repr, "invalid immediate for given destination place", self, ); @@ -683,7 +683,11 @@ where ) -> InterpResult<'tcx> { // We use the sizes from `value` below. // Ensure that matches the type of the place it is written to. - value.assert_matches_abi(layout.abi, "invalid immediate for given destination place", self); + value.assert_matches_abi( + layout.backend_repr, + "invalid immediate for given destination place", + self, + ); // Note that it is really important that the type here is the right one, and matches the // type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here // to handle padding properly, which is only correct if we never look at this data with the @@ -700,7 +704,7 @@ where alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar) } Immediate::ScalarPair(a_val, b_val) => { - let Abi::ScalarPair(a, b) = layout.abi else { + let BackendRepr::ScalarPair(a, b) = layout.backend_repr else { span_bug!( self.cur_span(), "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}", diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 8b5bb1332e7..cd2c1ef3613 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -11,6 +11,10 @@ use std::num::NonZero; use either::{Left, Right}; use hir::def::DefKind; +use rustc_abi::{ + BackendRepr, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, + WrappingRange, +}; use rustc_ast::Mutability; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; @@ -23,9 +27,6 @@ use rustc_middle::mir::interpret::{ use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, Ty}; use rustc_span::symbol::{Symbol, sym}; -use rustc_target::abi::{ - Abi, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange, -}; use tracing::trace; use super::machine::AllocMap; @@ -422,7 +423,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { // Reset provenance: ensure slice tail metadata does not preserve provenance, // and ensure all pointers do not preserve partial provenance. if self.reset_provenance_and_padding { - if matches!(imm.layout.abi, Abi::Scalar(..)) { + if matches!(imm.layout.backend_repr, BackendRepr::Scalar(..)) { // A thin pointer. If it has provenance, we don't have to do anything. // If it does not, ensure we clear the provenance in memory. if matches!(imm.to_scalar(), Scalar::Int(..)) { @@ -981,7 +982,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { let elem = layout.field(cx, 0); // Fast-path for large arrays of simple types that do not contain any padding. - if elem.abi.is_scalar() { + if elem.backend_repr.is_scalar() { out.add_range(base_offset, elem.size * count); } else { for idx in 0..count { @@ -1299,19 +1300,19 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, // FIXME: We could avoid some redundant checks here. For newtypes wrapping // scalars, we do the same check on every "level" (e.g., first we check // MyNewtype and then the scalar in there). - match val.layout.abi { - Abi::Uninhabited => { + match val.layout.backend_repr { + BackendRepr::Uninhabited => { let ty = val.layout.ty; throw_validation_failure!(self.path, UninhabitedVal { ty }); } - Abi::Scalar(scalar_layout) => { + BackendRepr::Scalar(scalar_layout) => { if !scalar_layout.is_uninit_valid() { // There is something to check here. let scalar = self.read_scalar(val, ExpectedKind::InitScalar)?; self.visit_scalar(scalar, scalar_layout)?; } } - Abi::ScalarPair(a_layout, b_layout) => { + BackendRepr::ScalarPair(a_layout, b_layout) => { // We can only proceed if *both* scalars need to be initialized. // FIXME: find a way to also check ScalarPair when one side can be uninit but // the other must be init. @@ -1322,12 +1323,12 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, self.visit_scalar(b, b_layout)?; } } - Abi::Vector { .. } => { + BackendRepr::Vector { .. } => { // No checks here, we assume layout computation gets this right. // (This is harder to check since Miri does not represent these as `Immediate`. We // also cannot use field projections since this might be a newtype around a vector.) } - Abi::Aggregate { .. } => { + BackendRepr::Memory { .. } => { // Nothing to do. } } diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs index 7a8b976dfc4..f743525f359 100644 --- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs +++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs @@ -1,9 +1,9 @@ +use rustc_abi::{BackendRepr, FieldsShape, Scalar, Variants}; use rustc_middle::bug; use rustc_middle::ty::layout::{ HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement, }; use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt}; -use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants}; use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeMachine}; use crate::interpret::{InterpCx, MemoryKind}; @@ -111,12 +111,12 @@ fn check_validity_requirement_lax<'tcx>( }; // Check the ABI. - let valid = match this.abi { - Abi::Uninhabited => false, // definitely UB - Abi::Scalar(s) => scalar_allows_raw_init(s), - Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2), - Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s), - Abi::Aggregate { .. } => true, // Fields are checked below. + let valid = match this.backend_repr { + BackendRepr::Uninhabited => false, // definitely UB + BackendRepr::Scalar(s) => scalar_allows_raw_init(s), + BackendRepr::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2), + BackendRepr::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s), + BackendRepr::Memory { .. } => true, // Fields are checked below. }; if !valid { // This is definitely not okay. diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs index dbb8c667532..6400685101b 100644 --- a/compiler/rustc_lint/src/builtin.rs +++ b/compiler/rustc_lint/src/builtin.rs @@ -16,6 +16,7 @@ use std::fmt::Write; use ast::token::TokenKind; +use rustc_abi::BackendRepr; use rustc_ast::tokenstream::{TokenStream, TokenTree}; use rustc_ast::visit::{FnCtxt, FnKind}; use rustc_ast::{self as ast, *}; @@ -40,7 +41,6 @@ use rustc_span::edition::Edition; use rustc_span::source_map::Spanned; use rustc_span::symbol::{Ident, Symbol, kw, sym}; use rustc_span::{BytePos, InnerSpan, Span}; -use rustc_target::abi::Abi; use rustc_target::asm::InlineAsmArch; use rustc_trait_selection::infer::{InferCtxtExt, TyCtxtInferExt}; use rustc_trait_selection::traits::misc::type_allowed_to_implement_copy; @@ -2466,7 +2466,9 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue { // Check if this ADT has a constrained layout (like `NonNull` and friends). if let Ok(layout) = cx.tcx.layout_of(cx.param_env.and(ty)) { - if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &layout.abi { + if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) = + &layout.backend_repr + { let range = scalar.valid_range(cx); let msg = if !range.contains(0) { "must be non-null" diff --git a/compiler/rustc_lint/src/foreign_modules.rs b/compiler/rustc_lint/src/foreign_modules.rs index abe4e3e78ee..394ea798d3e 100644 --- a/compiler/rustc_lint/src/foreign_modules.rs +++ b/compiler/rustc_lint/src/foreign_modules.rs @@ -217,7 +217,7 @@ fn structurally_same_type<'tcx>( // `extern` blocks cannot be generic, so we'll always get a layout here. let a_layout = tcx.layout_of(param_env.and(a)).unwrap(); let b_layout = tcx.layout_of(param_env.and(b)).unwrap(); - assert_eq!(a_layout.abi, b_layout.abi); + assert_eq!(a_layout.backend_repr, b_layout.backend_repr); assert_eq!(a_layout.size, b_layout.size); assert_eq!(a_layout.align, b_layout.align); } diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs index 0751d35cb9c..88878a018e7 100644 --- a/compiler/rustc_lint/src/types.rs +++ b/compiler/rustc_lint/src/types.rs @@ -1,6 +1,7 @@ use std::iter; use std::ops::ControlFlow; +use rustc_abi::{BackendRepr, TagEncoding, Variants, WrappingRange}; use rustc_data_structures::fx::FxHashSet; use rustc_errors::DiagMessage; use rustc_hir::{Expr, ExprKind}; @@ -13,7 +14,6 @@ use rustc_session::{declare_lint, declare_lint_pass, impl_lint_pass}; use rustc_span::def_id::LocalDefId; use rustc_span::symbol::sym; use rustc_span::{Span, Symbol, source_map}; -use rustc_target::abi::{Abi, TagEncoding, Variants, WrappingRange}; use rustc_target::spec::abi::Abi as SpecAbi; use tracing::debug; use {rustc_ast as ast, rustc_hir as hir}; @@ -776,8 +776,8 @@ pub(crate) fn repr_nullable_ptr<'tcx>( bug!("should be able to compute the layout of non-polymorphic type"); } - let field_ty_abi = &field_ty_layout.ok()?.abi; - if let Abi::Scalar(field_ty_scalar) = field_ty_abi { + let field_ty_abi = &field_ty_layout.ok()?.backend_repr; + if let BackendRepr::Scalar(field_ty_scalar) = field_ty_abi { match field_ty_scalar.valid_range(&tcx) { WrappingRange { start: 0, end } if end == field_ty_scalar.size(&tcx).unsigned_int_max() - 1 => diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 2c7a3ffd04c..0560ffe058a 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -4,8 +4,9 @@ use std::{cmp, fmt}; use rustc_abi::Primitive::{self, Float, Int, Pointer}; use rustc_abi::{ - Abi, AddressSpace, Align, FieldsShape, HasDataLayout, Integer, LayoutCalculator, LayoutData, - PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout, Variants, + AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutCalculator, + LayoutData, PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout, + Variants, }; use rustc_error_messages::DiagMessage; use rustc_errors::{ @@ -757,7 +758,7 @@ where Some(fields) => FieldsShape::Union(fields), None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() }, }, - abi: Abi::Uninhabited, + backend_repr: BackendRepr::Uninhabited, largest_niche: None, align: tcx.data_layout.i8_align, size: Size::ZERO, diff --git a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs index fd949a53384..2357dd73490 100644 --- a/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs +++ b/compiler/rustc_mir_build/src/build/expr/as_rvalue.rs @@ -13,7 +13,7 @@ use rustc_middle::ty::util::IntTypeExt; use rustc_middle::ty::{self, Ty, UpvarArgs}; use rustc_span::source_map::Spanned; use rustc_span::{DUMMY_SP, Span}; -use rustc_target::abi::{Abi, FieldIdx, Primitive}; +use rustc_target::abi::{BackendRepr, FieldIdx, Primitive}; use tracing::debug; use crate::build::expr::as_place::PlaceBase; @@ -207,7 +207,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ); let (op, ty) = (Operand::Move(discr), discr_ty); - if let Abi::Scalar(scalar) = layout.unwrap().abi + if let BackendRepr::Scalar(scalar) = layout.unwrap().backend_repr && !scalar.is_always_valid(&this.tcx) && let Primitive::Int(int_width, _signed) = scalar.primitive() { diff --git a/compiler/rustc_mir_dataflow/src/value_analysis.rs b/compiler/rustc_mir_dataflow/src/value_analysis.rs index f7d4a082779..80151b8ba2d 100644 --- a/compiler/rustc_mir_dataflow/src/value_analysis.rs +++ b/compiler/rustc_mir_dataflow/src/value_analysis.rs @@ -858,7 +858,7 @@ impl<'tcx> Map<'tcx> { // Allocate a value slot if it doesn't have one, and the user requested one. assert!(place_info.value_index.is_none()); if let Ok(layout) = tcx.layout_of(param_env.and(place_info.ty)) - && layout.abi.is_scalar() + && layout.backend_repr.is_scalar() { place_info.value_index = Some(self.value_count.into()); self.value_count += 1; diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index 002216f50f2..ca24d0d7e70 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -2,6 +2,7 @@ //! //! Currently, this pass only propagates scalar values. +use rustc_abi::{BackendRepr, FIRST_VARIANT, FieldIdx, Size, VariantIdx}; use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str}; use rustc_const_eval::interpret::{ ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable, interp_ok, @@ -20,7 +21,6 @@ use rustc_mir_dataflow::value_analysis::{ }; use rustc_mir_dataflow::{Analysis, Results, ResultsVisitor}; use rustc_span::DUMMY_SP; -use rustc_target::abi::{Abi, FIRST_VARIANT, FieldIdx, Size, VariantIdx}; use tracing::{debug, debug_span, instrument}; // These constants are somewhat random guesses and have not been optimized. @@ -457,7 +457,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { // a pair and sometimes not. But as a hack we always return a pair // and just make the 2nd component `Bottom` when it does not exist. Some(val) => { - if matches!(val.layout.abi, Abi::ScalarPair(..)) { + if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) { let (val, overflow) = val.to_scalar_pair(); (FlatSet::Elem(val), FlatSet::Elem(overflow)) } else { @@ -470,7 +470,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { // Exactly one side is known, attempt some algebraic simplifications. (FlatSet::Elem(const_arg), _) | (_, FlatSet::Elem(const_arg)) => { let layout = const_arg.layout; - if !matches!(layout.abi, rustc_target::abi::Abi::Scalar(..)) { + if !matches!(layout.backend_repr, rustc_target::abi::BackendRepr::Scalar(..)) { return (FlatSet::Top, FlatSet::Top); } @@ -589,13 +589,13 @@ impl<'a, 'tcx> Collector<'a, 'tcx> { } let place = map.find(place.as_ref())?; - if layout.abi.is_scalar() + if layout.backend_repr.is_scalar() && let Some(value) = propagatable_scalar(place, state, map) { return Some(Const::Val(ConstValue::Scalar(value), ty)); } - if matches!(layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { + if matches!(layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) { let alloc_id = ecx .intern_with_temp_alloc(layout, |ecx, dest| { try_write_constant(ecx, dest, place, ty, state, map) @@ -641,7 +641,7 @@ fn try_write_constant<'tcx>( } // Fast path for scalars. - if layout.abi.is_scalar() + if layout.backend_repr.is_scalar() && let Some(value) = propagatable_scalar(place, state, map) { return ecx.write_immediate(Immediate::Scalar(value), dest); diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 79c62372df0..8a646d8cbfe 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -85,6 +85,7 @@ use std::borrow::Cow; use either::Either; +use rustc_abi::{self as abi, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx}; use rustc_const_eval::const_eval::DummyMachine; use rustc_const_eval::interpret::{ ImmTy, Immediate, InterpCx, MemPlaceMeta, MemoryKind, OpTy, Projectable, Scalar, @@ -103,7 +104,6 @@ use rustc_middle::ty::layout::{HasParamEnv, LayoutOf}; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::DUMMY_SP; use rustc_span::def_id::DefId; -use rustc_target::abi::{self, Abi, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx}; use smallvec::SmallVec; use tracing::{debug, instrument, trace}; @@ -427,7 +427,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { }; let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx); ImmTy::from_immediate(ptr_imm, ty).into() - } else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { + } else if matches!( + ty.backend_repr, + BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) + ) { let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?; let variant_dest = if let Some(variant) = variant { self.ecx.project_downcast(&dest, variant).discard_err()? @@ -573,12 +576,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { // limited transmutes: it only works between types with the same layout, and // cannot transmute pointers to integers. if value.as_mplace_or_imm().is_right() { - let can_transmute = match (value.layout.abi, to.abi) { - (Abi::Scalar(s1), Abi::Scalar(s2)) => { + let can_transmute = match (value.layout.backend_repr, to.backend_repr) { + (BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => { s1.size(&self.ecx) == s2.size(&self.ecx) && !matches!(s1.primitive(), Primitive::Pointer(..)) } - (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => { + (BackendRepr::ScalarPair(a1, b1), BackendRepr::ScalarPair(a2, b2)) => { a1.size(&self.ecx) == a2.size(&self.ecx) && b1.size(&self.ecx) == b2.size(&self.ecx) && // The alignment of the second component determines its offset, so that also needs to match. @@ -1241,7 +1244,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { let as_bits = |value| { let constant = self.evaluated[value].as_ref()?; - if layout.abi.is_scalar() { + if layout.backend_repr.is_scalar() { let scalar = self.ecx.read_scalar(constant).discard_err()?; scalar.to_bits(constant.layout.size).discard_err() } else { @@ -1497,12 +1500,12 @@ fn op_to_prop_const<'tcx>( // Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to // avoid. - if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { + if !matches!(op.layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) { return None; } // If this constant has scalar ABI, return it as a `ConstValue::Scalar`. - if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi + if let BackendRepr::Scalar(abi::Scalar::Initialized { .. }) = op.layout.backend_repr && let Some(scalar) = ecx.read_scalar(op).discard_err() { if !scalar.try_to_scalar_int().is_ok() { diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index 08923748eb2..0604665642a 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -4,6 +4,7 @@ use std::fmt::Debug; +use rustc_abi::{BackendRepr, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx}; use rustc_const_eval::const_eval::DummyMachine; use rustc_const_eval::interpret::{ ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, interp_ok, @@ -19,7 +20,6 @@ use rustc_middle::mir::*; use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout}; use rustc_middle::ty::{self, ConstInt, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt}; use rustc_span::Span; -use rustc_target::abi::{Abi, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx}; use tracing::{debug, instrument, trace}; use crate::errors::{AssertLint, AssertLintKind}; @@ -557,7 +557,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let right = self.use_ecx(|this| this.ecx.read_immediate(&right))?; let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?; - if matches!(val.layout.abi, Abi::ScalarPair(..)) { + if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) { // FIXME `Value` should properly support pairs in `Immediate`... but currently // it does not. let (val, overflow) = val.to_pair(&self.ecx); @@ -651,9 +651,9 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { let to = self.ecx.layout_of(to).ok()?; // `offset` for immediates only supports scalar/scalar-pair ABIs, // so bail out if the target is not one. - match (value.layout.abi, to.abi) { - (Abi::Scalar(..), Abi::Scalar(..)) => {} - (Abi::ScalarPair(..), Abi::ScalarPair(..)) => {} + match (value.layout.backend_repr, to.backend_repr) { + (BackendRepr::Scalar(..), BackendRepr::Scalar(..)) => {} + (BackendRepr::ScalarPair(..), BackendRepr::ScalarPair(..)) => {} _ => return None, } diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs index 0b6cf82ca8b..7260c12f278 100644 --- a/compiler/rustc_passes/src/layout_test.rs +++ b/compiler/rustc_passes/src/layout_test.rs @@ -81,8 +81,12 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) { let meta_items = attr.meta_item_list().unwrap_or_default(); for meta_item in meta_items { match meta_item.name_or_empty() { + // FIXME: this never was about ABI and now this dump arg is confusing sym::abi => { - tcx.dcx().emit_err(LayoutAbi { span, abi: format!("{:?}", ty_layout.abi) }); + tcx.dcx().emit_err(LayoutAbi { + span, + abi: format!("{:?}", ty_layout.backend_repr), + }); } sym::align => { diff --git a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs index 410bf0f40f4..af24fd23f50 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs @@ -56,7 +56,7 @@ impl<'tcx> Stable<'tcx> for rustc_abi::LayoutData Stable<'tcx> for rustc_abi::TagEncoding { } } -impl<'tcx> Stable<'tcx> for rustc_abi::Abi { +impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr { type T = ValueAbi; fn stable(&self, tables: &mut Tables<'_>) -> Self::T { match *self { - rustc_abi::Abi::Uninhabited => ValueAbi::Uninhabited, - rustc_abi::Abi::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)), - rustc_abi::Abi::ScalarPair(first, second) => { + rustc_abi::BackendRepr::Uninhabited => ValueAbi::Uninhabited, + rustc_abi::BackendRepr::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)), + rustc_abi::BackendRepr::ScalarPair(first, second) => { ValueAbi::ScalarPair(first.stable(tables), second.stable(tables)) } - rustc_abi::Abi::Vector { element, count } => { + rustc_abi::BackendRepr::Vector { element, count } => { ValueAbi::Vector { element: element.stable(tables), count } } - rustc_abi::Abi::Aggregate { sized } => ValueAbi::Aggregate { sized }, + rustc_abi::BackendRepr::Memory { sized } => ValueAbi::Aggregate { sized }, } } } diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs index ffec76370d0..d1234c3cc91 100644 --- a/compiler/rustc_target/src/callconv/loongarch.rs +++ b/compiler/rustc_target/src/callconv/loongarch.rs @@ -1,5 +1,7 @@ use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform}; -use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; +use crate::abi::{ + self, BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout, +}; use crate::spec::HasTargetSpec; use crate::spec::abi::Abi as SpecAbi; @@ -21,8 +23,8 @@ enum FloatConv { struct CannotUseFpConv; fn is_loongarch_aggregate(arg: &ArgAbi<'_, Ty>) -> bool { - match arg.layout.abi { - Abi::Vector { .. } => true, + match arg.layout.backend_repr { + BackendRepr::Vector { .. } => true, _ => arg.layout.is_aggregate(), } } @@ -38,8 +40,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>( where Ty: TyAbiInterface<'a, C> + Copy, { - match arg_layout.abi { - Abi::Scalar(scalar) => match scalar.primitive() { + match arg_layout.backend_repr { + BackendRepr::Scalar(scalar) => match scalar.primitive() { abi::Int(..) | abi::Pointer(_) => { if arg_layout.size.bits() > xlen { return Err(CannotUseFpConv); @@ -77,8 +79,8 @@ where } } }, - Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv), - Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields { + BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv), + BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields { FieldsShape::Primitive => { unreachable!("aggregates can't have `FieldsShape::Primitive`") } @@ -311,7 +313,7 @@ fn classify_arg<'a, Ty, C>( } fn extend_integer_width(arg: &mut ArgAbi<'_, Ty>, xlen: u64) { - if let Abi::Scalar(scalar) = arg.layout.abi { + if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr { if let abi::Int(i, _) = scalar.primitive() { // 32-bit integers are always sign-extended if i.size().bits() == 32 && xlen > 32 { diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs index 2c3258c8d42..5bdf4c2ad77 100644 --- a/compiler/rustc_target/src/callconv/mips64.rs +++ b/compiler/rustc_target/src/callconv/mips64.rs @@ -5,7 +5,7 @@ use crate::abi::{self, HasDataLayout, Size, TyAbiInterface}; fn extend_integer_width_mips(arg: &mut ArgAbi<'_, Ty>, bits: u64) { // Always sign extend u32 values on 64-bit mips - if let abi::Abi::Scalar(scalar) = arg.layout.abi { + if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr { if let abi::Int(i, signed) = scalar.primitive() { if !signed && i.size().bits() == 32 { if let PassMode::Direct(ref mut attrs) = arg.mode { @@ -24,8 +24,8 @@ where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout, { - match ret.layout.field(cx, i).abi { - abi::Abi::Scalar(scalar) => match scalar.primitive() { + match ret.layout.field(cx, i).backend_repr { + abi::BackendRepr::Scalar(scalar) => match scalar.primitive() { abi::Float(abi::F32) => Some(Reg::f32()), abi::Float(abi::F64) => Some(Reg::f64()), _ => None, @@ -109,7 +109,7 @@ where let offset = arg.layout.fields.offset(i); // We only care about aligned doubles - if let abi::Abi::Scalar(scalar) = field.abi { + if let abi::BackendRepr::Scalar(scalar) = field.backend_repr { if scalar.primitive() == abi::Float(abi::F64) { if offset.is_aligned(dl.f64_align.abi) { // Insert enough integers to cover [last_offset, offset) diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs index 25b001b57e8..8c3df9c426b 100644 --- a/compiler/rustc_target/src/callconv/mod.rs +++ b/compiler/rustc_target/src/callconv/mod.rs @@ -6,7 +6,8 @@ use rustc_macros::HashStable_Generic; use rustc_span::Symbol; use crate::abi::{ - self, Abi, AddressSpace, Align, HasDataLayout, Pointer, Size, TyAbiInterface, TyAndLayout, + self, AddressSpace, Align, BackendRepr, HasDataLayout, Pointer, Size, TyAbiInterface, + TyAndLayout, }; use crate::spec::abi::Abi as SpecAbi; use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi}; @@ -350,15 +351,17 @@ impl<'a, Ty> ArgAbi<'a, Ty> { layout: TyAndLayout<'a, Ty>, scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes, ) -> Self { - let mode = match layout.abi { - Abi::Uninhabited => PassMode::Ignore, - Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)), - Abi::ScalarPair(a, b) => PassMode::Pair( + let mode = match layout.backend_repr { + BackendRepr::Uninhabited => PassMode::Ignore, + BackendRepr::Scalar(scalar) => { + PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)) + } + BackendRepr::ScalarPair(a, b) => PassMode::Pair( scalar_attrs(&layout, a, Size::ZERO), scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)), ), - Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()), - Abi::Aggregate { .. } => Self::indirect_pass_mode(&layout), + BackendRepr::Vector { .. } => PassMode::Direct(ArgAttributes::new()), + BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout), }; ArgAbi { layout, mode } } @@ -460,7 +463,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> { pub fn extend_integer_width_to(&mut self, bits: u64) { // Only integers have signedness - if let Abi::Scalar(scalar) = self.layout.abi { + if let BackendRepr::Scalar(scalar) = self.layout.backend_repr { if let abi::Int(i, signed) = scalar.primitive() { if i.size().bits() < bits { if let PassMode::Direct(ref mut attrs) = self.mode { @@ -512,7 +515,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> { // That elevates any type difference to an ABI difference since we just use the // full Rust type as the LLVM argument/return type. if matches!(self.mode, PassMode::Direct(..)) - && matches!(self.layout.abi, Abi::Aggregate { .. }) + && matches!(self.layout.backend_repr, BackendRepr::Memory { .. }) { // For aggregates in `Direct` mode to be compatible, the types need to be equal. self.layout.ty == other.layout.ty @@ -791,8 +794,8 @@ impl<'a, Ty> FnAbi<'a, Ty> { continue; } - match arg.layout.abi { - Abi::Aggregate { .. } => {} + match arg.layout.backend_repr { + BackendRepr::Memory { .. } => {} // This is a fun case! The gist of what this is doing is // that we want callers and callees to always agree on the @@ -813,7 +816,9 @@ impl<'a, Ty> FnAbi<'a, Ty> { // Note that the intrinsic ABI is exempt here as // that's how we connect up to LLVM and it's unstable // anyway, we control all calls to it in libstd. - Abi::Vector { .. } if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => { + BackendRepr::Vector { .. } + if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => + { arg.make_indirect(); continue; } diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs index f96169e6a61..c0298edb5ab 100644 --- a/compiler/rustc_target/src/callconv/riscv.rs +++ b/compiler/rustc_target/src/callconv/riscv.rs @@ -4,8 +4,10 @@ // Reference: Clang RISC-V ELF psABI lowering code // https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773 +use rustc_abi::{BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; + +use crate::abi; use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform}; -use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; use crate::spec::HasTargetSpec; use crate::spec::abi::Abi as SpecAbi; @@ -27,8 +29,8 @@ enum FloatConv { struct CannotUseFpConv; fn is_riscv_aggregate(arg: &ArgAbi<'_, Ty>) -> bool { - match arg.layout.abi { - Abi::Vector { .. } => true, + match arg.layout.backend_repr { + BackendRepr::Vector { .. } => true, _ => arg.layout.is_aggregate(), } } @@ -44,8 +46,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>( where Ty: TyAbiInterface<'a, C> + Copy, { - match arg_layout.abi { - Abi::Scalar(scalar) => match scalar.primitive() { + match arg_layout.backend_repr { + BackendRepr::Scalar(scalar) => match scalar.primitive() { abi::Int(..) | abi::Pointer(_) => { if arg_layout.size.bits() > xlen { return Err(CannotUseFpConv); @@ -83,8 +85,8 @@ where } } }, - Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv), - Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields { + BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv), + BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields { FieldsShape::Primitive => { unreachable!("aggregates can't have `FieldsShape::Primitive`") } @@ -317,7 +319,7 @@ fn classify_arg<'a, Ty, C>( } fn extend_integer_width(arg: &mut ArgAbi<'_, Ty>, xlen: u64) { - if let Abi::Scalar(scalar) = arg.layout.abi { + if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr { if let abi::Int(i, _) = scalar.primitive() { // 32-bit integers are always sign-extended if i.size().bits() == 32 && xlen > 32 { diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs index 835353f76fc..313d8730399 100644 --- a/compiler/rustc_target/src/callconv/sparc64.rs +++ b/compiler/rustc_target/src/callconv/sparc64.rs @@ -109,11 +109,11 @@ where return data; } - match layout.abi { - abi::Abi::Scalar(scalar) => { + match layout.backend_repr { + abi::BackendRepr::Scalar(scalar) => { data = arg_scalar(cx, &scalar, offset, data); } - abi::Abi::Aggregate { .. } => { + abi::BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { if offset < layout.fields.offset(i) { offset = layout.fields.offset(i); @@ -122,7 +122,7 @@ where } } _ => { - if let abi::Abi::ScalarPair(scalar1, scalar2) = &layout.abi { + if let abi::BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr { data = arg_scalar_pair(cx, scalar1, scalar2, offset, data); } } diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs index e907beecb38..a5af975d4d2 100644 --- a/compiler/rustc_target/src/callconv/x86.rs +++ b/compiler/rustc_target/src/callconv/x86.rs @@ -1,6 +1,6 @@ use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind}; use crate::abi::{ - Abi, AddressSpace, Align, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout, + AddressSpace, Align, BackendRepr, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout, }; use crate::spec::HasTargetSpec; use crate::spec::abi::Abi as SpecAbi; @@ -105,10 +105,12 @@ where where Ty: TyAbiInterface<'a, C> + Copy, { - match layout.abi { - Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) => false, - Abi::Vector { .. } => true, - Abi::Aggregate { .. } => { + match layout.backend_repr { + BackendRepr::Uninhabited + | BackendRepr::Scalar(_) + | BackendRepr::ScalarPair(..) => false, + BackendRepr::Vector { .. } => true, + BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { if contains_vector(cx, layout.field(cx, i)) { return true; @@ -223,9 +225,9 @@ where // Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs. && abi != SpecAbi::RustIntrinsic { - let has_float = match fn_abi.ret.layout.abi { - Abi::Scalar(s) => matches!(s.primitive(), Float(_)), - Abi::ScalarPair(s1, s2) => { + let has_float = match fn_abi.ret.layout.backend_repr { + BackendRepr::Scalar(s) => matches!(s.primitive(), Float(_)), + BackendRepr::ScalarPair(s1, s2) => { matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_)) } _ => false, // anyway not passed via registers on x86 diff --git a/compiler/rustc_target/src/callconv/x86_64.rs b/compiler/rustc_target/src/callconv/x86_64.rs index 9910e623ac9..bd101b23ea1 100644 --- a/compiler/rustc_target/src/callconv/x86_64.rs +++ b/compiler/rustc_target/src/callconv/x86_64.rs @@ -1,8 +1,10 @@ // The classification code for the x86_64 ABI is taken from the clay language // https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp +use rustc_abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; + +use crate::abi; use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind}; -use crate::abi::{self, Abi, HasDataLayout, Size, TyAbiInterface, TyAndLayout}; /// Classification of "eightbyte" components. // N.B., the order of the variants is from general to specific, @@ -46,17 +48,17 @@ where return Ok(()); } - let mut c = match layout.abi { - Abi::Uninhabited => return Ok(()), + let mut c = match layout.backend_repr { + BackendRepr::Uninhabited => return Ok(()), - Abi::Scalar(scalar) => match scalar.primitive() { + BackendRepr::Scalar(scalar) => match scalar.primitive() { abi::Int(..) | abi::Pointer(_) => Class::Int, abi::Float(_) => Class::Sse, }, - Abi::Vector { .. } => Class::Sse, + BackendRepr::Vector { .. } => Class::Sse, - Abi::ScalarPair(..) | Abi::Aggregate { .. } => { + BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { let field_off = off + layout.fields.offset(i); classify(cx, layout.field(cx, i), cls, field_off)?; diff --git a/compiler/rustc_target/src/callconv/x86_win64.rs b/compiler/rustc_target/src/callconv/x86_win64.rs index e5a20b248e4..83d94cb11ba 100644 --- a/compiler/rustc_target/src/callconv/x86_win64.rs +++ b/compiler/rustc_target/src/callconv/x86_win64.rs @@ -1,25 +1,28 @@ +use rustc_abi::{BackendRepr, Float, Primitive}; + use crate::abi::call::{ArgAbi, FnAbi, Reg}; -use crate::abi::{Abi, Float, Primitive}; use crate::spec::HasTargetSpec; // Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing pub(crate) fn compute_abi_info(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<'_, Ty>) { let fixup = |a: &mut ArgAbi<'_, Ty>| { - match a.layout.abi { - Abi::Uninhabited | Abi::Aggregate { sized: false } => {} - Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => match a.layout.size.bits() { - 8 => a.cast_to(Reg::i8()), - 16 => a.cast_to(Reg::i16()), - 32 => a.cast_to(Reg::i32()), - 64 => a.cast_to(Reg::i64()), - _ => a.make_indirect(), - }, - Abi::Vector { .. } => { + match a.layout.backend_repr { + BackendRepr::Uninhabited | BackendRepr::Memory { sized: false } => {} + BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => { + match a.layout.size.bits() { + 8 => a.cast_to(Reg::i8()), + 16 => a.cast_to(Reg::i16()), + 32 => a.cast_to(Reg::i32()), + 64 => a.cast_to(Reg::i64()), + _ => a.make_indirect(), + } + } + BackendRepr::Vector { .. } => { // FIXME(eddyb) there should be a size cap here // (probably what clang calls "illegal vectors"). } - Abi::Scalar(scalar) => { + BackendRepr::Scalar(scalar) => { // Match what LLVM does for `f128` so that `compiler-builtins` builtins match up // with what LLVM expects. if a.layout.size.bytes() > 8 diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs index e1728b08a39..9d313d16500 100644 --- a/compiler/rustc_target/src/callconv/xtensa.rs +++ b/compiler/rustc_target/src/callconv/xtensa.rs @@ -6,7 +6,7 @@ //! Section 2.3 from the Xtensa programmers guide. use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform}; -use crate::abi::{Abi, HasDataLayout, Size, TyAbiInterface}; +use crate::abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface}; use crate::spec::HasTargetSpec; const NUM_ARG_GPRS: u64 = 6; @@ -114,8 +114,8 @@ where } fn is_xtensa_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool { - match arg.layout.abi { - Abi::Vector { .. } => true, + match arg.layout.backend_repr { + BackendRepr::Vector { .. } => true, _ => arg.layout.is_aggregate(), } } diff --git a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs index a068f25fe35..3ddf023cf97 100644 --- a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs +++ b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs @@ -18,7 +18,7 @@ use rustc_middle::ty::{ }; use rustc_span::Span; use rustc_span::symbol::Symbol; -use rustc_target::abi::Abi; +use rustc_target::abi::BackendRepr; use smallvec::SmallVec; use tracing::{debug, instrument}; @@ -523,8 +523,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method: // e.g., `Rc<()>` let unit_receiver_ty = receiver_for_self_ty(tcx, receiver_ty, tcx.types.unit, method_def_id); - match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.abi) { - Ok(Abi::Scalar(..)) => (), + match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.backend_repr) { + Ok(BackendRepr::Scalar(..)) => (), abi => { tcx.dcx().span_delayed_bug( tcx.def_span(method_def_id), @@ -538,8 +538,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method: // e.g., `Rc` let trait_object_receiver = receiver_for_self_ty(tcx, receiver_ty, trait_object_ty, method_def_id); - match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.abi) { - Ok(Abi::ScalarPair(..)) => (), + match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.backend_repr) { + Ok(BackendRepr::ScalarPair(..)) => (), abi => { tcx.dcx().span_delayed_bug( tcx.def_span(method_def_id), diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index 48149a08de8..722ef5f4569 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -1,7 +1,7 @@ use std::iter; use rustc_abi::Primitive::Pointer; -use rustc_abi::{Abi, PointerKind, Scalar, Size}; +use rustc_abi::{BackendRepr, PointerKind, Scalar, Size}; use rustc_hir as hir; use rustc_hir::lang_items::LangItem; use rustc_middle::bug; @@ -469,7 +469,7 @@ fn fn_abi_sanity_check<'tcx>( // careful. Scalar/ScalarPair is fine, since backends will generally use // `layout.abi` and ignore everything else. We should just reject `Aggregate` // entirely here, but some targets need to be fixed first. - if matches!(arg.layout.abi, Abi::Aggregate { .. }) { + if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) { // For an unsized type we'd only pass the sized prefix, so there is no universe // in which we ever want to allow this. assert!( @@ -500,7 +500,7 @@ fn fn_abi_sanity_check<'tcx>( // Similar to `Direct`, we need to make sure that backends use `layout.abi` and // ignore the rest of the layout. assert!( - matches!(arg.layout.abi, Abi::ScalarPair(..)), + matches!(arg.layout.backend_repr, BackendRepr::ScalarPair(..)), "PassMode::Pair for type {}", arg.layout.ty ); @@ -658,9 +658,9 @@ fn fn_abi_adjust_for_abi<'tcx>( fn unadjust<'tcx>(arg: &mut ArgAbi<'tcx, Ty<'tcx>>) { // This still uses `PassMode::Pair` for ScalarPair types. That's unlikely to be intended, // but who knows what breaks if we change this now. - if matches!(arg.layout.abi, Abi::Aggregate { .. }) { + if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) { assert!( - arg.layout.abi.is_sized(), + arg.layout.backend_repr.is_sized(), "'unadjusted' ABI does not support unsized arguments" ); } @@ -731,8 +731,8 @@ fn make_thin_self_ptr<'tcx>( // FIXME (mikeyhew) change this to use &own if it is ever added to the language Ty::new_mut_ptr(tcx, layout.ty) } else { - match layout.abi { - Abi::ScalarPair(..) | Abi::Scalar(..) => (), + match layout.backend_repr { + BackendRepr::ScalarPair(..) | BackendRepr::Scalar(..) => (), _ => bug!("receiver type has unsupported layout: {:?}", layout), } diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index 94b80e2694d..5ca7afe2453 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -5,8 +5,9 @@ use hir::def_id::DefId; use rustc_abi::Integer::{I8, I32}; use rustc_abi::Primitive::{self, Float, Int, Pointer}; use rustc_abi::{ - Abi, AbiAndPrefAlign, AddressSpace, Align, FieldsShape, HasDataLayout, LayoutCalculatorError, - LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, Variants, WrappingRange, + AbiAndPrefAlign, AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout, + LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, + Variants, WrappingRange, }; use rustc_index::bit_set::BitSet; use rustc_index::{IndexSlice, IndexVec}; @@ -173,7 +174,9 @@ fn layout_of_uncached<'tcx>( let mut layout = LayoutData::clone(&layout.0); match *pat { ty::PatternKind::Range { start, end, include_end } => { - if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &mut layout.abi { + if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) = + &mut layout.backend_repr + { if let Some(start) = start { scalar.valid_range_mut().start = start .try_to_bits(tcx, param_env) @@ -275,7 +278,7 @@ fn layout_of_uncached<'tcx>( return Ok(tcx.mk_layout(LayoutData::scalar(cx, data_ptr))); } - let Abi::Scalar(metadata) = metadata_layout.abi else { + let BackendRepr::Scalar(metadata) = metadata_layout.backend_repr else { return Err(error(cx, LayoutError::Unknown(pointee))); }; @@ -330,9 +333,9 @@ fn layout_of_uncached<'tcx>( .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?; let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) { - Abi::Uninhabited + BackendRepr::Uninhabited } else { - Abi::Aggregate { sized: true } + BackendRepr::Memory { sized: true } }; let largest_niche = if count != 0 { element.largest_niche } else { None }; @@ -340,7 +343,7 @@ fn layout_of_uncached<'tcx>( tcx.mk_layout(LayoutData { variants: Variants::Single { index: FIRST_VARIANT }, fields: FieldsShape::Array { stride: element.size, count }, - abi, + backend_repr: abi, largest_niche, align: element.align, size, @@ -353,7 +356,7 @@ fn layout_of_uncached<'tcx>( tcx.mk_layout(LayoutData { variants: Variants::Single { index: FIRST_VARIANT }, fields: FieldsShape::Array { stride: element.size, count: 0 }, - abi: Abi::Aggregate { sized: false }, + backend_repr: BackendRepr::Memory { sized: false }, largest_niche: None, align: element.align, size: Size::ZERO, @@ -364,7 +367,7 @@ fn layout_of_uncached<'tcx>( ty::Str => tcx.mk_layout(LayoutData { variants: Variants::Single { index: FIRST_VARIANT }, fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 }, - abi: Abi::Aggregate { sized: false }, + backend_repr: BackendRepr::Memory { sized: false }, largest_niche: None, align: dl.i8_align, size: Size::ZERO, @@ -384,8 +387,8 @@ fn layout_of_uncached<'tcx>( &ReprOptions::default(), StructKind::AlwaysSized, )?; - match unit.abi { - Abi::Aggregate { ref mut sized } => *sized = false, + match unit.backend_repr { + BackendRepr::Memory { ref mut sized } => *sized = false, _ => bug!(), } tcx.mk_layout(unit) @@ -500,7 +503,7 @@ fn layout_of_uncached<'tcx>( // Compute the ABI of the element type: let e_ly = cx.layout_of(e_ty)?; - let Abi::Scalar(e_abi) = e_ly.abi else { + let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else { // This error isn't caught in typeck, e.g., if // the element type of the vector is generic. tcx.dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty }); @@ -516,12 +519,12 @@ fn layout_of_uncached<'tcx>( // Non-power-of-two vectors have padding up to the next power-of-two. // If we're a packed repr, remove the padding while keeping the alignment as close // to a vector as possible. - (Abi::Aggregate { sized: true }, AbiAndPrefAlign { + (BackendRepr::Memory { sized: true }, AbiAndPrefAlign { abi: Align::max_for_offset(size), pref: dl.vector_align(size).pref, }) } else { - (Abi::Vector { element: e_abi, count: e_len }, dl.vector_align(size)) + (BackendRepr::Vector { element: e_abi, count: e_len }, dl.vector_align(size)) }; let size = size.align_to(align.abi); @@ -535,7 +538,7 @@ fn layout_of_uncached<'tcx>( tcx.mk_layout(LayoutData { variants: Variants::Single { index: FIRST_VARIANT }, fields, - abi, + backend_repr: abi, largest_niche: e_ly.largest_niche, size, align, @@ -985,10 +988,12 @@ fn coroutine_layout<'tcx>( size = size.align_to(align.abi); - let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) { - Abi::Uninhabited + let abi = if prefix.backend_repr.is_uninhabited() + || variants.iter().all(|v| v.backend_repr.is_uninhabited()) + { + BackendRepr::Uninhabited } else { - Abi::Aggregate { sized: true } + BackendRepr::Memory { sized: true } }; let layout = tcx.mk_layout(LayoutData { @@ -999,7 +1004,7 @@ fn coroutine_layout<'tcx>( variants, }, fields: outer_fields, - abi, + backend_repr: abi, // Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to // self-referentiality), getting the discriminant can cause aliasing violations. // `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that diff --git a/compiler/rustc_ty_utils/src/layout/invariant.rs b/compiler/rustc_ty_utils/src/layout/invariant.rs index 3db5a4f1805..f43feb552b2 100644 --- a/compiler/rustc_ty_utils/src/layout/invariant.rs +++ b/compiler/rustc_ty_utils/src/layout/invariant.rs @@ -66,12 +66,12 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) { // Verify the ABI mandated alignment and size. - let align = layout.abi.inherent_align(cx).map(|align| align.abi); - let size = layout.abi.inherent_size(cx); + let align = layout.backend_repr.inherent_align(cx).map(|align| align.abi); + let size = layout.backend_repr.inherent_size(cx); let Some((align, size)) = align.zip(size) else { assert_matches!( - layout.layout.abi(), - Abi::Uninhabited | Abi::Aggregate { .. }, + layout.layout.backend_repr(), + BackendRepr::Uninhabited | BackendRepr::Memory { .. }, "ABI unexpectedly missing alignment and/or size in {layout:#?}" ); return; @@ -88,12 +88,12 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa ); // Verify per-ABI invariants - match layout.layout.abi() { - Abi::Scalar(_) => { + match layout.layout.backend_repr() { + BackendRepr::Scalar(_) => { // Check that this matches the underlying field. let inner = skip_newtypes(cx, layout); assert!( - matches!(inner.layout.abi(), Abi::Scalar(_)), + matches!(inner.layout.backend_repr(), BackendRepr::Scalar(_)), "`Scalar` type {} is newtype around non-`Scalar` type {}", layout.ty, inner.ty @@ -132,7 +132,7 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa "`Scalar` field with bad align in {inner:#?}", ); assert!( - matches!(field.abi, Abi::Scalar(_)), + matches!(field.backend_repr, BackendRepr::Scalar(_)), "`Scalar` field with bad ABI in {inner:#?}", ); } @@ -141,11 +141,11 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa } } } - Abi::ScalarPair(scalar1, scalar2) => { + BackendRepr::ScalarPair(scalar1, scalar2) => { // Check that the underlying pair of fields matches. let inner = skip_newtypes(cx, layout); assert!( - matches!(inner.layout.abi(), Abi::ScalarPair(..)), + matches!(inner.layout.backend_repr(), BackendRepr::ScalarPair(..)), "`ScalarPair` type {} is newtype around non-`ScalarPair` type {}", layout.ty, inner.ty @@ -208,8 +208,8 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa "`ScalarPair` first field with bad align in {inner:#?}", ); assert_matches!( - field1.abi, - Abi::Scalar(_), + field1.backend_repr, + BackendRepr::Scalar(_), "`ScalarPair` first field with bad ABI in {inner:#?}", ); let field2_offset = size1.align_to(align2); @@ -226,16 +226,16 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa "`ScalarPair` second field with bad align in {inner:#?}", ); assert_matches!( - field2.abi, - Abi::Scalar(_), + field2.backend_repr, + BackendRepr::Scalar(_), "`ScalarPair` second field with bad ABI in {inner:#?}", ); } - Abi::Vector { element, .. } => { + BackendRepr::Vector { element, .. } => { assert!(align >= element.align(cx).abi); // just sanity-checking `vector_align`. // FIXME: Do some kind of check of the inner type, like for Scalar and ScalarPair. } - Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check. + BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {} // Nothing to check. } } @@ -274,13 +274,13 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa // The top-level ABI and the ABI of the variants should be coherent. let scalar_coherent = |s1: Scalar, s2: Scalar| s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx); - let abi_coherent = match (layout.abi, variant.abi) { - (Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2), - (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => { + let abi_coherent = match (layout.backend_repr, variant.backend_repr) { + (BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => scalar_coherent(s1, s2), + (BackendRepr::ScalarPair(a1, b1), BackendRepr::ScalarPair(a2, b2)) => { scalar_coherent(a1, a2) && scalar_coherent(b1, b2) } - (Abi::Uninhabited, _) => true, - (Abi::Aggregate { .. }, _) => true, + (BackendRepr::Uninhabited, _) => true, + (BackendRepr::Memory { .. }, _) => true, _ => false, }; if !abi_coherent { -- cgit 1.4.1-3-g733a5 From 65ff2a6ad71de45c848a622d86b6c74092e1b734 Mon Sep 17 00:00:00 2001 From: Zalathar Date: Wed, 30 Oct 2024 10:02:46 +1100 Subject: Consistently use safe wrapper function `set_section` --- compiler/rustc_codegen_llvm/src/back/lto.rs | 3 ++- compiler/rustc_codegen_llvm/src/back/write.rs | 16 +++++++--------- compiler/rustc_codegen_llvm/src/base.rs | 6 ++---- compiler/rustc_codegen_llvm/src/context.rs | 2 +- compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs | 2 +- compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 1 + compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp | 6 ++++-- 7 files changed, 18 insertions(+), 18 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 02149e176ea..48beb9be2b2 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -165,13 +165,14 @@ fn get_bitcode_slice_from_object_data<'a>( // We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment // name" which in the public API for sections gets treated as part of the section name, but // internally in MachOObjectFile.cpp gets treated separately. - let section_name = bitcode_section_name(cgcx).trim_start_matches("__LLVM,"); + let section_name = bitcode_section_name(cgcx).to_str().unwrap().trim_start_matches("__LLVM,"); let mut len = 0; let data = unsafe { llvm::LLVMRustGetSliceFromObjectDataByName( obj.as_ptr(), obj.len(), section_name.as_ptr(), + section_name.len(), &mut len, ) }; diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index e68ba96f14f..bfa9e8b82a0 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -1,4 +1,4 @@ -use std::ffi::CString; +use std::ffi::{CStr, CString}; use std::io::{self, Write}; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -958,14 +958,13 @@ fn target_is_aix(cgcx: &CodegenContext) -> bool { cgcx.opts.target_triple.triple().contains("-aix") } -//FIXME use c string literals here too -pub(crate) fn bitcode_section_name(cgcx: &CodegenContext) -> &'static str { +pub(crate) fn bitcode_section_name(cgcx: &CodegenContext) -> &'static CStr { if target_is_apple(cgcx) { - "__LLVM,__bitcode\0" + c"__LLVM,__bitcode" } else if target_is_aix(cgcx) { - ".ipa\0" + c".ipa" } else { - ".llvmbc\0" + c".llvmbc" } } @@ -1042,8 +1041,7 @@ unsafe fn embed_bitcode( ); llvm::LLVMSetInitializer(llglobal, llconst); - let section = bitcode_section_name(cgcx); - llvm::LLVMSetSection(llglobal, section.as_c_char_ptr()); + llvm::set_section(llglobal, bitcode_section_name(cgcx)); llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::LLVMSetGlobalConstant(llglobal, llvm::True); @@ -1061,7 +1059,7 @@ unsafe fn embed_bitcode( } else { c".llvmcmd" }; - llvm::LLVMSetSection(llglobal, section.as_ptr()); + llvm::set_section(llglobal, section); llvm::set_linkage(llglobal, llvm::Linkage::PrivateLinkage); } else { // We need custom section flags, so emit module-level inline assembly. diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index 0ba8d82406a..32793894794 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -145,10 +145,8 @@ pub(crate) fn compile_codegen_unit( pub(crate) fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { let Some(sect) = attrs.link_section else { return }; - unsafe { - let buf = SmallCStr::new(sect.as_str()); - llvm::LLVMSetSection(llval, buf.as_ptr()); - } + let buf = SmallCStr::new(sect.as_str()); + llvm::set_section(llval, &buf); } pub(crate) fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage { diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index f33da42d63e..9778ff4918c 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -565,7 +565,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr()); llvm::LLVMSetInitializer(g, array); llvm::set_linkage(g, llvm::Linkage::AppendingLinkage); - llvm::LLVMSetSection(g, c"llvm.metadata".as_ptr()); + llvm::set_section(g, c"llvm.metadata"); } } } diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index 7947c9c8c8e..aef8642f199 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -72,7 +72,7 @@ pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>( let section_var = cx .define_global(section_var_name, llvm_type) .unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name)); - llvm::LLVMSetSection(section_var, c".debug_gdb_scripts".as_ptr()); + llvm::set_section(section_var, c".debug_gdb_scripts"); llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global); diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 888d41d4726..c0ca7e51025 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -2416,6 +2416,7 @@ unsafe extern "C" { data: *const u8, len: usize, name: *const u8, + name_len: usize, out_len: &mut usize, ) -> *const u8; diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp index 20bf32d731e..3e906f89c15 100644 --- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp @@ -1559,8 +1559,10 @@ extern "C" LLVMModuleRef LLVMRustParseBitcodeForLTO(LLVMContextRef Context, extern "C" const char *LLVMRustGetSliceFromObjectDataByName(const char *data, size_t len, const char *name, + size_t name_len, size_t *out_len) { *out_len = 0; + auto Name = StringRef(name, name_len); auto Data = StringRef(data, len); auto Buffer = MemoryBufferRef(Data, ""); // The id is unused. file_magic Type = identify_magic(Buffer.getBuffer()); @@ -1571,8 +1573,8 @@ extern "C" const char *LLVMRustGetSliceFromObjectDataByName(const char *data, return nullptr; } for (const object::SectionRef &Sec : (*ObjFileOrError)->sections()) { - Expected Name = Sec.getName(); - if (Name && *Name == name) { + Expected SecName = Sec.getName(); + if (SecName && *SecName == Name) { Expected SectionOrError = Sec.getContents(); if (!SectionOrError) { LLVMRustSetLastError(toString(SectionOrError.takeError()).c_str()); -- cgit 1.4.1-3-g733a5 From c3071590ab70f016380fe16030057e84b76e59eb Mon Sep 17 00:00:00 2001 From: Zalathar Date: Mon, 28 Oct 2024 17:25:40 +1100 Subject: Clean up FFI calls for operand bundles --- compiler/rustc_codegen_llvm/src/allocator.rs | 3 +- compiler/rustc_codegen_llvm/src/builder.rs | 23 +++--- compiler/rustc_codegen_llvm/src/common.rs | 8 +-- compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 91 ++++++++++++------------ compiler/rustc_codegen_llvm/src/llvm/mod.rs | 38 +++++++--- compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp | 78 ++++---------------- 6 files changed, 103 insertions(+), 138 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index a2e6c7eb856..149ded28356 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -154,7 +154,7 @@ fn create_wrapper_function( .enumerate() .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint)) .collect::>(); - let ret = llvm::LLVMRustBuildCall( + let ret = llvm::LLVMBuildCallWithOperandBundles( llbuilder, ty, callee, @@ -162,6 +162,7 @@ fn create_wrapper_function( args.len() as c_uint, [].as_ptr(), 0 as c_uint, + c"".as_ptr(), ); llvm::LLVMSetTailCall(ret, True); if output.is_some() { diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 8702532c36e..f8eb3e0c982 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -239,7 +239,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let args = self.check_call("invoke", llty, llfn, args); let funclet_bundle = funclet.map(|funclet| funclet.bundle()); - let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw); let mut bundles: SmallVec<[_; 2]> = SmallVec::new(); if let Some(funclet_bundle) = funclet_bundle { bundles.push(funclet_bundle); @@ -250,13 +249,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { // Emit KCFI operand bundle let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); - let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); - if let Some(kcfi_bundle) = kcfi_bundle { + if let Some(kcfi_bundle) = kcfi_bundle.as_deref() { bundles.push(kcfi_bundle); } let invoke = unsafe { - llvm::LLVMRustBuildInvoke( + llvm::LLVMBuildInvokeWithOperandBundles( self.llbuilder, llty, llfn, @@ -1179,7 +1177,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { let args = self.check_call("call", llty, llfn, args); let funclet_bundle = funclet.map(|funclet| funclet.bundle()); - let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw); let mut bundles: SmallVec<[_; 2]> = SmallVec::new(); if let Some(funclet_bundle) = funclet_bundle { bundles.push(funclet_bundle); @@ -1190,13 +1187,12 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { // Emit KCFI operand bundle let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); - let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); - if let Some(kcfi_bundle) = kcfi_bundle { + if let Some(kcfi_bundle) = kcfi_bundle.as_deref() { bundles.push(kcfi_bundle); } let call = unsafe { - llvm::LLVMRustBuildCall( + llvm::LLVMBuildCallWithOperandBundles( self.llbuilder, llty, llfn, @@ -1204,6 +1200,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { args.len() as c_uint, bundles.as_ptr(), bundles.len() as c_uint, + c"".as_ptr(), ) }; if let Some(fn_abi) = fn_abi { @@ -1509,7 +1506,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { let args = self.check_call("callbr", llty, llfn, args); let funclet_bundle = funclet.map(|funclet| funclet.bundle()); - let funclet_bundle = funclet_bundle.as_ref().map(|b| &*b.raw); let mut bundles: SmallVec<[_; 2]> = SmallVec::new(); if let Some(funclet_bundle) = funclet_bundle { bundles.push(funclet_bundle); @@ -1520,13 +1516,12 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { // Emit KCFI operand bundle let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); - let kcfi_bundle = kcfi_bundle.as_ref().map(|b| &*b.raw); - if let Some(kcfi_bundle) = kcfi_bundle { + if let Some(kcfi_bundle) = kcfi_bundle.as_deref() { bundles.push(kcfi_bundle); } let callbr = unsafe { - llvm::LLVMRustBuildCallBr( + llvm::LLVMBuildCallBr( self.llbuilder, llty, llfn, @@ -1601,7 +1596,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { fn_abi: Option<&FnAbi<'tcx, Ty<'tcx>>>, instance: Option>, llfn: &'ll Value, - ) -> Option> { + ) -> Option> { let is_indirect_call = unsafe { llvm::LLVMRustIsNonGVFunctionPointerTy(llfn) }; let kcfi_bundle = if self.tcx.sess.is_sanitizer_kcfi_enabled() && let Some(fn_abi) = fn_abi @@ -1627,7 +1622,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { kcfi::typeid_for_fnabi(self.tcx, fn_abi, options) }; - Some(llvm::OperandBundleDef::new("kcfi", &[self.const_u32(kcfi_typeid)])) + Some(llvm::OperandBundleOwned::new("kcfi", &[self.const_u32(kcfi_typeid)])) } else { None }; diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 29adc616ee2..8852dec7d9f 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -17,7 +17,7 @@ use tracing::debug; use crate::consts::const_alloc_to_llvm; pub(crate) use crate::context::CodegenCx; -use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, Metadata, OperandBundleDef, True}; +use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, Metadata, True}; use crate::type_::Type; use crate::value::Value; @@ -63,19 +63,19 @@ use crate::value::Value; /// the `OperandBundleDef` value created for MSVC landing pads. pub(crate) struct Funclet<'ll> { cleanuppad: &'ll Value, - operand: OperandBundleDef<'ll>, + operand: llvm::OperandBundleOwned<'ll>, } impl<'ll> Funclet<'ll> { pub(crate) fn new(cleanuppad: &'ll Value) -> Self { - Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]) } + Funclet { cleanuppad, operand: llvm::OperandBundleOwned::new("funclet", &[cleanuppad]) } } pub(crate) fn cleanuppad(&self) -> &'ll Value { self.cleanuppad } - pub(crate) fn bundle(&self) -> &OperandBundleDef<'ll> { + pub(crate) fn bundle(&self) -> &llvm::OperandBundle<'ll> { &self.operand } } diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 888d41d4726..d67f76b51a2 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -3,6 +3,7 @@ use std::fmt::Debug; use std::marker::PhantomData; +use std::ptr; use libc::{c_char, c_int, c_uint, c_ulonglong, c_void, size_t}; use rustc_macros::TryFromU32; @@ -708,8 +709,9 @@ unsafe extern "C" { } #[repr(C)] pub struct RustArchiveMember<'a>(InvariantOpaque<'a>); +/// Opaque pointee of `LLVMOperandBundleRef`. #[repr(C)] -pub struct OperandBundleDef<'a>(InvariantOpaque<'a>); +pub(crate) struct OperandBundle<'a>(InvariantOpaque<'a>); #[repr(C)] pub struct Linker<'a>(InvariantOpaque<'a>); @@ -1538,6 +1540,50 @@ unsafe extern "C" { pub fn LLVMGetOrInsertComdat(M: &Module, Name: *const c_char) -> &Comdat; pub fn LLVMSetComdat(V: &Value, C: &Comdat); + + pub(crate) fn LLVMCreateOperandBundle( + Tag: *const c_char, + TagLen: size_t, + Args: *const &'_ Value, + NumArgs: c_uint, + ) -> *mut OperandBundle<'_>; + pub(crate) fn LLVMDisposeOperandBundle(Bundle: ptr::NonNull>); + + pub(crate) fn LLVMBuildCallWithOperandBundles<'a>( + B: &Builder<'a>, + Ty: &'a Type, + Fn: &'a Value, + Args: *const &'a Value, + NumArgs: c_uint, + Bundles: *const &OperandBundle<'a>, + NumBundles: c_uint, + Name: *const c_char, + ) -> &'a Value; + pub(crate) fn LLVMBuildInvokeWithOperandBundles<'a>( + B: &Builder<'a>, + Ty: &'a Type, + Fn: &'a Value, + Args: *const &'a Value, + NumArgs: c_uint, + Then: &'a BasicBlock, + Catch: &'a BasicBlock, + Bundles: *const &OperandBundle<'a>, + NumBundles: c_uint, + Name: *const c_char, + ) -> &'a Value; + pub(crate) fn LLVMBuildCallBr<'a>( + B: &Builder<'a>, + Ty: &'a Type, + Fn: &'a Value, + DefaultDest: &'a BasicBlock, + IndirectDests: *const &'a BasicBlock, + NumIndirectDests: c_uint, + Args: *const &'a Value, + NumArgs: c_uint, + Bundles: *const &OperandBundle<'a>, + NumBundles: c_uint, + Name: *const c_char, + ) -> &'a Value; } #[link(name = "llvm-wrapper", kind = "static")] @@ -1623,47 +1669,11 @@ unsafe extern "C" { AttrsLen: size_t, ); - pub fn LLVMRustBuildInvoke<'a>( - B: &Builder<'a>, - Ty: &'a Type, - Fn: &'a Value, - Args: *const &'a Value, - NumArgs: c_uint, - Then: &'a BasicBlock, - Catch: &'a BasicBlock, - OpBundles: *const &OperandBundleDef<'a>, - NumOpBundles: c_uint, - Name: *const c_char, - ) -> &'a Value; - - pub fn LLVMRustBuildCallBr<'a>( - B: &Builder<'a>, - Ty: &'a Type, - Fn: &'a Value, - DefaultDest: &'a BasicBlock, - IndirectDests: *const &'a BasicBlock, - NumIndirectDests: c_uint, - Args: *const &'a Value, - NumArgs: c_uint, - OpBundles: *const &OperandBundleDef<'a>, - NumOpBundles: c_uint, - Name: *const c_char, - ) -> &'a Value; - pub fn LLVMRustSetFastMath(Instr: &Value); pub fn LLVMRustSetAlgebraicMath(Instr: &Value); pub fn LLVMRustSetAllowReassoc(Instr: &Value); // Miscellaneous instructions - pub fn LLVMRustBuildCall<'a>( - B: &Builder<'a>, - Ty: &'a Type, - Fn: &'a Value, - Args: *const &'a Value, - NumArgs: c_uint, - OpBundles: *const &OperandBundleDef<'a>, - NumOpBundles: c_uint, - ) -> &'a Value; pub fn LLVMRustBuildMemCpy<'a>( B: &Builder<'a>, Dst: &'a Value, @@ -2357,13 +2367,6 @@ unsafe extern "C" { pub fn LLVMRustSetDataLayoutFromTargetMachine<'a>(M: &'a Module, TM: &'a TargetMachine); - pub fn LLVMRustBuildOperandBundleDef( - Name: *const c_char, - Inputs: *const &'_ Value, - NumInputs: c_uint, - ) -> &mut OperandBundleDef<'_>; - pub fn LLVMRustFreeOperandBundleDef<'a>(Bundle: &'a mut OperandBundleDef<'a>); - pub fn LLVMRustPositionBuilderAtStart<'a>(B: &Builder<'a>, BB: &'a BasicBlock); pub fn LLVMRustSetModulePICLevel(M: &Module); diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index acd425bbb8e..00a5cd3b859 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -2,11 +2,12 @@ use std::cell::RefCell; use std::ffi::{CStr, CString}; +use std::ops::Deref; +use std::ptr; use std::str::FromStr; use std::string::FromUtf8Error; use libc::c_uint; -use rustc_data_structures::small_c_str::SmallCStr; use rustc_llvm::RustString; use rustc_target::abi::{Align, Size, WrappingRange}; @@ -331,28 +332,43 @@ pub fn last_error() -> Option { } } -pub struct OperandBundleDef<'a> { - pub raw: &'a mut ffi::OperandBundleDef<'a>, +/// Owns an [`OperandBundle`], and will dispose of it when dropped. +pub(crate) struct OperandBundleOwned<'a> { + raw: ptr::NonNull>, } -impl<'a> OperandBundleDef<'a> { - pub fn new(name: &str, vals: &[&'a Value]) -> Self { - let name = SmallCStr::new(name); - let def = unsafe { - LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint) +impl<'a> OperandBundleOwned<'a> { + pub(crate) fn new(name: &str, vals: &[&'a Value]) -> Self { + let raw = unsafe { + LLVMCreateOperandBundle( + name.as_c_char_ptr(), + name.len(), + vals.as_ptr(), + vals.len() as c_uint, + ) }; - OperandBundleDef { raw: def } + OperandBundleOwned { raw: ptr::NonNull::new(raw).unwrap() } } } -impl Drop for OperandBundleDef<'_> { +impl Drop for OperandBundleOwned<'_> { fn drop(&mut self) { unsafe { - LLVMRustFreeOperandBundleDef(&mut *(self.raw as *mut _)); + LLVMDisposeOperandBundle(self.raw); } } } +impl<'a> Deref for OperandBundleOwned<'a> { + type Target = OperandBundle<'a>; + + fn deref(&self) -> &Self::Target { + // SAFETY: The returned reference is opaque and can only used for FFI. + // It is valid for as long as `&self` is. + unsafe { self.raw.as_ref() } + } +} + pub(crate) fn add_module_flag_u32( module: &Module, merge_behavior: ModuleFlagMergeBehavior, diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index b3dab6d512e..645b4082be5 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -1537,38 +1537,6 @@ LLVMRustUnpackSMDiagnostic(LLVMSMDiagnosticRef DRef, RustStringRef MessageOut, return true; } -extern "C" OperandBundleDef *LLVMRustBuildOperandBundleDef(const char *Name, - LLVMValueRef *Inputs, - unsigned NumInputs) { - return new OperandBundleDef(Name, - ArrayRef(unwrap(Inputs), NumInputs)); -} - -extern "C" void LLVMRustFreeOperandBundleDef(OperandBundleDef *Bundle) { - delete Bundle; -} - -// OpBundlesIndirect is an array of pointers (*not* a pointer to an array). -extern "C" LLVMValueRef LLVMRustBuildCall(LLVMBuilderRef B, LLVMTypeRef Ty, - LLVMValueRef Fn, LLVMValueRef *Args, - unsigned NumArgs, - OperandBundleDef **OpBundlesIndirect, - unsigned NumOpBundles) { - Value *Callee = unwrap(Fn); - FunctionType *FTy = unwrap(Ty); - - // FIXME: Is there a way around this? - SmallVector OpBundles; - OpBundles.reserve(NumOpBundles); - for (unsigned i = 0; i < NumOpBundles; ++i) { - OpBundles.push_back(*OpBundlesIndirect[i]); - } - - return wrap(unwrap(B)->CreateCall(FTy, Callee, - ArrayRef(unwrap(Args), NumArgs), - ArrayRef(OpBundles))); -} - extern "C" LLVMValueRef LLVMRustBuildMemCpy(LLVMBuilderRef B, LLVMValueRef Dst, unsigned DstAlign, LLVMValueRef Src, unsigned SrcAlign, @@ -1596,37 +1564,18 @@ extern "C" LLVMValueRef LLVMRustBuildMemSet(LLVMBuilderRef B, LLVMValueRef Dst, MaybeAlign(DstAlign), IsVolatile)); } -// OpBundlesIndirect is an array of pointers (*not* a pointer to an array). -extern "C" LLVMValueRef -LLVMRustBuildInvoke(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn, - LLVMValueRef *Args, unsigned NumArgs, - LLVMBasicBlockRef Then, LLVMBasicBlockRef Catch, - OperandBundleDef **OpBundlesIndirect, unsigned NumOpBundles, - const char *Name) { - Value *Callee = unwrap(Fn); - FunctionType *FTy = unwrap(Ty); - - // FIXME: Is there a way around this? - SmallVector OpBundles; - OpBundles.reserve(NumOpBundles); - for (unsigned i = 0; i < NumOpBundles; ++i) { - OpBundles.push_back(*OpBundlesIndirect[i]); - } - - return wrap(unwrap(B)->CreateInvoke(FTy, Callee, unwrap(Then), unwrap(Catch), - ArrayRef(unwrap(Args), NumArgs), - ArrayRef(OpBundles), - Name)); -} +// Polyfill for `LLVMBuildCallBr`, which was added in LLVM 19. +// +// FIXME: Remove when Rust's minimum supported LLVM version reaches 19. +#if LLVM_VERSION_LT(19, 0) +DEFINE_SIMPLE_CONVERSION_FUNCTIONS(OperandBundleDef, LLVMOperandBundleRef) -// OpBundlesIndirect is an array of pointers (*not* a pointer to an array). extern "C" LLVMValueRef -LLVMRustBuildCallBr(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn, - LLVMBasicBlockRef DefaultDest, - LLVMBasicBlockRef *IndirectDests, unsigned NumIndirectDests, - LLVMValueRef *Args, unsigned NumArgs, - OperandBundleDef **OpBundlesIndirect, unsigned NumOpBundles, - const char *Name) { +LLVMBuildCallBr(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn, + LLVMBasicBlockRef DefaultDest, LLVMBasicBlockRef *IndirectDests, + unsigned NumIndirectDests, LLVMValueRef *Args, unsigned NumArgs, + LLVMOperandBundleRef *Bundles, unsigned NumBundles, + const char *Name) { Value *Callee = unwrap(Fn); FunctionType *FTy = unwrap(Ty); @@ -1639,9 +1588,9 @@ LLVMRustBuildCallBr(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn, // FIXME: Is there a way around this? SmallVector OpBundles; - OpBundles.reserve(NumOpBundles); - for (unsigned i = 0; i < NumOpBundles; ++i) { - OpBundles.push_back(*OpBundlesIndirect[i]); + OpBundles.reserve(NumBundles); + for (unsigned i = 0; i < NumBundles; ++i) { + OpBundles.push_back(*unwrap(Bundles[i])); } return wrap( @@ -1650,6 +1599,7 @@ LLVMRustBuildCallBr(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Fn, ArrayRef(unwrap(Args), NumArgs), ArrayRef(OpBundles), Name)); } +#endif extern "C" void LLVMRustPositionBuilderAtStart(LLVMBuilderRef B, LLVMBasicBlockRef BB) { -- cgit 1.4.1-3-g733a5 From 8dddd1ae603756ef4957320880115ecc65d706a5 Mon Sep 17 00:00:00 2001 From: Zalathar Date: Thu, 31 Oct 2024 21:12:15 +1100 Subject: coverage: Avoid ICE when `coverage_cx` is unexpectedly unavailable --- compiler/rustc_codegen_llvm/src/context.rs | 1 + compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs | 6 +++++- compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs | 7 ++++++- 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 9778ff4918c..313957dda96 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -554,6 +554,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// Extra state that is only available when coverage instrumentation is enabled. #[inline] + #[track_caller] pub(crate) fn coverage_cx(&self) -> &coverageinfo::CrateCoverageContext<'ll, 'tcx> { self.coverage_cx.as_ref().expect("only called when coverage instrumentation is enabled") } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs index 8edd788ee36..f6378199fe2 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen.rs @@ -54,7 +54,11 @@ pub(crate) fn finalize(cx: &CodegenCx<'_, '_>) { add_unused_functions(cx); } - let function_coverage_map = cx.coverage_cx().take_function_coverage_map(); + // FIXME(#132395): Can this be none even when coverage is enabled? + let function_coverage_map = match cx.coverage_cx { + Some(ref cx) => cx.take_function_coverage_map(), + None => return, + }; if function_coverage_map.is_empty() { // This module has no functions with coverage instrumentation return; diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs index a298ed86276..e4ff50816b9 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mod.rs @@ -152,7 +152,12 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { return; }; - let mut coverage_map = bx.coverage_cx().function_coverage_map.borrow_mut(); + // FIXME(#132395): Unwrapping `coverage_cx` here has led to ICEs in the + // wild, so keep this early-return until we understand why. + let mut coverage_map = match bx.coverage_cx { + Some(ref cx) => cx.function_coverage_map.borrow_mut(), + None => return, + }; let func_coverage = coverage_map .entry(instance) .or_insert_with(|| FunctionCoverageCollector::new(instance, function_coverage_info)); -- cgit 1.4.1-3-g733a5 From 9caced7badc337ced7ad89eb614621c39bd996e9 Mon Sep 17 00:00:00 2001 From: Matthew Maurer Date: Thu, 31 Oct 2024 20:35:24 +0000 Subject: llvm: Match new LLVM 128-bit integer alignment on sparc LLVM continues to align more 128-bit integers to 128-bits in the data layout rather than relying on the high level language to do it. Update SPARC target files to match and add a backcompat replacement for current LLVMs. See llvm/llvm-project#106951 for details --- compiler/rustc_codegen_llvm/src/context.rs | 5 +++++ compiler/rustc_target/src/spec/targets/sparc64_unknown_linux_gnu.rs | 2 +- compiler/rustc_target/src/spec/targets/sparc64_unknown_netbsd.rs | 2 +- compiler/rustc_target/src/spec/targets/sparc64_unknown_openbsd.rs | 2 +- compiler/rustc_target/src/spec/targets/sparc_unknown_linux_gnu.rs | 2 +- compiler/rustc_target/src/spec/targets/sparc_unknown_none_elf.rs | 2 +- compiler/rustc_target/src/spec/targets/sparcv9_sun_solaris.rs | 2 +- 7 files changed, 11 insertions(+), 6 deletions(-) (limited to 'compiler/rustc_codegen_llvm/src') diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 313957dda96..13428b109d9 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -148,6 +148,11 @@ pub(crate) unsafe fn create_module<'ll>( target_data_layout = target_data_layout.replace("-p270:32:32-p271:32:32-p272:64:64", ""); } + if sess.target.arch.starts_with("sparc") { + // LLVM 20 updates the sparc layout to correctly align 128 bit integers to 128 bit. + // See https://github.com/llvm/llvm-project/pull/106951 + target_data_layout = target_data_layout.replace("-i128:128", ""); + } } // Ensure the data-layout values hardcoded remain the defaults. diff --git a/compiler/rustc_target/src/spec/targets/sparc64_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/targets/sparc64_unknown_linux_gnu.rs index 7d089aeb787..ac2141f9b08 100644 --- a/compiler/rustc_target/src/spec/targets/sparc64_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/targets/sparc64_unknown_linux_gnu.rs @@ -16,7 +16,7 @@ pub(crate) fn target() -> Target { std: Some(true), }, pointer_width: 64, - data_layout: "E-m:e-i64:64-n32:64-S128".into(), + data_layout: "E-m:e-i64:64-i128:128-n32:64-S128".into(), arch: "sparc64".into(), options: base, } diff --git a/compiler/rustc_target/src/spec/targets/sparc64_unknown_netbsd.rs b/compiler/rustc_target/src/spec/targets/sparc64_unknown_netbsd.rs index 21f09d6428e..d16b3776cfb 100644 --- a/compiler/rustc_target/src/spec/targets/sparc64_unknown_netbsd.rs +++ b/compiler/rustc_target/src/spec/targets/sparc64_unknown_netbsd.rs @@ -16,7 +16,7 @@ pub(crate) fn target() -> Target { std: Some(true), }, pointer_width: 64, - data_layout: "E-m:e-i64:64-n32:64-S128".into(), + data_layout: "E-m:e-i64:64-i128:128-n32:64-S128".into(), arch: "sparc64".into(), options: TargetOptions { endian: Endian::Big, mcount: "__mcount".into(), ..base }, } diff --git a/compiler/rustc_target/src/spec/targets/sparc64_unknown_openbsd.rs b/compiler/rustc_target/src/spec/targets/sparc64_unknown_openbsd.rs index 12626dce477..91e64061020 100644 --- a/compiler/rustc_target/src/spec/targets/sparc64_unknown_openbsd.rs +++ b/compiler/rustc_target/src/spec/targets/sparc64_unknown_openbsd.rs @@ -17,7 +17,7 @@ pub(crate) fn target() -> Target { std: Some(true), }, pointer_width: 64, - data_layout: "E-m:e-i64:64-n32:64-S128".into(), + data_layout: "E-m:e-i64:64-i128:128-n32:64-S128".into(), arch: "sparc64".into(), options: base, } diff --git a/compiler/rustc_target/src/spec/targets/sparc_unknown_linux_gnu.rs b/compiler/rustc_target/src/spec/targets/sparc_unknown_linux_gnu.rs index 08f0bb302dd..2777395757f 100644 --- a/compiler/rustc_target/src/spec/targets/sparc_unknown_linux_gnu.rs +++ b/compiler/rustc_target/src/spec/targets/sparc_unknown_linux_gnu.rs @@ -11,7 +11,7 @@ pub(crate) fn target() -> Target { std: Some(true), }, pointer_width: 32, - data_layout: "E-m:e-p:32:32-i64:64-f128:64-n32-S64".into(), + data_layout: "E-m:e-p:32:32-i64:64-i128:128-f128:64-n32-S64".into(), arch: "sparc".into(), options: TargetOptions { cpu: "v9".into(), diff --git a/compiler/rustc_target/src/spec/targets/sparc_unknown_none_elf.rs b/compiler/rustc_target/src/spec/targets/sparc_unknown_none_elf.rs index 0157d03f854..987f69429c0 100644 --- a/compiler/rustc_target/src/spec/targets/sparc_unknown_none_elf.rs +++ b/compiler/rustc_target/src/spec/targets/sparc_unknown_none_elf.rs @@ -17,7 +17,7 @@ pub(crate) fn target() -> Target { ..Default::default() }; Target { - data_layout: "E-m:e-p:32:32-i64:64-f128:64-n32-S64".into(), + data_layout: "E-m:e-p:32:32-i64:64-i128:128-f128:64-n32-S64".into(), llvm_target: "sparc-unknown-none-elf".into(), metadata: crate::spec::TargetMetadata { description: Some("Bare 32-bit SPARC V7+".into()), diff --git a/compiler/rustc_target/src/spec/targets/sparcv9_sun_solaris.rs b/compiler/rustc_target/src/spec/targets/sparcv9_sun_solaris.rs index 138ce902d5e..fdc9628f78e 100644 --- a/compiler/rustc_target/src/spec/targets/sparcv9_sun_solaris.rs +++ b/compiler/rustc_target/src/spec/targets/sparcv9_sun_solaris.rs @@ -19,7 +19,7 @@ pub(crate) fn target() -> Target { std: Some(true), }, pointer_width: 64, - data_layout: "E-m:e-i64:64-n32:64-S128".into(), + data_layout: "E-m:e-i64:64-i128:128-n32:64-S128".into(), // Use "sparc64" instead of "sparcv9" here, since the former is already // used widely in the source base. If we ever needed ABI // differentiation from the sparc64, we could, but that would probably -- cgit 1.4.1-3-g733a5