diff options
| author | bors <bors@rust-lang.org> | 2024-04-19 09:00:46 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2024-04-19 09:00:46 +0000 |
| commit | 95ae2dd7fff88ae753bf44e42417e8ab52d15f93 (patch) | |
| tree | ec975ef7b8a25d4e5cc32e8c217c27de92568e89 /compiler/rustc_codegen_llvm/src | |
| parent | 134ee309c9db64b682dc06ebe23291d85f91ea9e (diff) | |
| parent | fecd7fc937ad2d019f92d5b5c07d73ae206deaae (diff) | |
| download | rust-95ae2dd7fff88ae753bf44e42417e8ab52d15f93.tar.gz rust-95ae2dd7fff88ae753bf44e42417e8ab52d15f93.zip | |
Auto merge of #3489 - rust-lang:rustup-2024-04-19, r=RalfJung
Automatic Rustup
Diffstat (limited to 'compiler/rustc_codegen_llvm/src')
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/back/archive.rs | 1 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/common.rs | 49 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/consts.rs | 18 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm/ffi.rs | 1 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/llvm_util.rs | 7 |
5 files changed, 55 insertions, 21 deletions
diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index 0619000364b..d4a3e39cef7 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -415,6 +415,7 @@ impl<'a> LlvmArchiveBuilder<'a> { members.as_ptr() as *const &_, true, kind, + self.sess.target.arch == "arm64ec", ); let ret = if r.into_result().is_err() { let err = llvm::LLVMRustGetLastError(); diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 568fcc3f3cf..ec33ce6292a 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -255,21 +255,38 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { let (prov, offset) = ptr.into_parts(); let (base_addr, base_addr_space) = match self.tcx.global_alloc(prov.alloc_id()) { GlobalAlloc::Memory(alloc) => { - let init = const_alloc_to_llvm(self, alloc); - let alloc = alloc.inner(); - let value = match alloc.mutability { - Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), - _ => self.static_addr_of(init, alloc.align, None), - }; - if !self.sess().fewer_names() && llvm::get_value_name(value).is_empty() { - let hash = self.tcx.with_stable_hashing_context(|mut hcx| { - let mut hasher = StableHasher::new(); - alloc.hash_stable(&mut hcx, &mut hasher); - hasher.finish::<Hash128>() - }); - llvm::set_value_name(value, format!("alloc_{hash:032x}").as_bytes()); + // For ZSTs directly codegen an aligned pointer. + // This avoids generating a zero-sized constant value and actually needing a + // real address at runtime. + if alloc.inner().len() == 0 { + assert_eq!(offset.bytes(), 0); + let llval = self.const_usize(alloc.inner().align.bytes()); + return if matches!(layout.primitive(), Pointer(_)) { + unsafe { llvm::LLVMConstIntToPtr(llval, llty) } + } else { + self.const_bitcast(llval, llty) + }; + } else { + let init = const_alloc_to_llvm(self, alloc, /*static*/ false); + let alloc = alloc.inner(); + let value = match alloc.mutability { + Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None), + _ => self.static_addr_of(init, alloc.align, None), + }; + if !self.sess().fewer_names() && llvm::get_value_name(value).is_empty() + { + let hash = self.tcx.with_stable_hashing_context(|mut hcx| { + let mut hasher = StableHasher::new(); + alloc.hash_stable(&mut hcx, &mut hasher); + hasher.finish::<Hash128>() + }); + llvm::set_value_name( + value, + format!("alloc_{hash:032x}").as_bytes(), + ); + } + (value, AddressSpace::DATA) } - (value, AddressSpace::DATA) } GlobalAlloc::Function(fn_instance) => ( self.get_fn_addr(fn_instance.polymorphize(self.tcx)), @@ -280,7 +297,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { .tcx .global_alloc(self.tcx.vtable_allocation((ty, trait_ref))) .unwrap_memory(); - let init = const_alloc_to_llvm(self, alloc); + let init = const_alloc_to_llvm(self, alloc, /*static*/ false); let value = self.static_addr_of(init, alloc.inner().align, None); (value, AddressSpace::DATA) } @@ -308,7 +325,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value { - const_alloc_to_llvm(self, alloc) + const_alloc_to_llvm(self, alloc, /*static*/ false) } fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 4afa230e598..a62dfe13204 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -26,8 +26,22 @@ use rustc_target::abi::{ }; use std::ops::Range; -pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<'_>) -> &'ll Value { +pub fn const_alloc_to_llvm<'ll>( + cx: &CodegenCx<'ll, '_>, + alloc: ConstAllocation<'_>, + is_static: bool, +) -> &'ll Value { let alloc = alloc.inner(); + // We expect that callers of const_alloc_to_llvm will instead directly codegen a pointer or + // integer for any &ZST where the ZST is a constant (i.e. not a static). We should never be + // producing empty LLVM allocations as they're just adding noise to binaries and forcing less + // optimal codegen. + // + // Statics have a guaranteed meaningful address so it's less clear that we want to do + // something like this; it's also harder. + if !is_static { + assert!(alloc.len() != 0); + } let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1); let dl = cx.data_layout(); let pointer_size = dl.pointer_size.bytes() as usize; @@ -120,7 +134,7 @@ fn codegen_static_initializer<'ll, 'tcx>( def_id: DefId, ) -> Result<(&'ll Value, ConstAllocation<'tcx>), ErrorHandled> { let alloc = cx.tcx.eval_static_initializer(def_id)?; - Ok((const_alloc_to_llvm(cx, alloc), alloc)) + Ok((const_alloc_to_llvm(cx, alloc, /*static*/ true), alloc)) } fn set_global_alignment<'ll>(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) { diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 5509baaa3e9..83158f6f1d5 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -2303,6 +2303,7 @@ extern "C" { Members: *const &RustArchiveMember<'_>, WriteSymbtab: bool, Kind: ArchiveKind, + isEC: bool, ) -> LLVMRustResult; pub fn LLVMRustArchiveMemberNew<'a>( Filename: *const c_char, diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index c9e62e504ae..5552b381025 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -270,9 +270,10 @@ pub fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> LLVMFeature<'a> { "sve2-bitperm", TargetFeatureFoldStrength::EnableOnly("neon"), ), - // The unaligned-scalar-mem feature was renamed to fast-unaligned-access. - ("riscv32" | "riscv64", "fast-unaligned-access") if get_version().0 <= 17 => { - LLVMFeature::new("unaligned-scalar-mem") + // In LLVM 18, `unaligned-scalar-mem` was merged with `unaligned-vector-mem` into a single feature called + // `fast-unaligned-access`. In LLVM 19, it was split back out. + ("riscv32" | "riscv64", "unaligned-scalar-mem") if get_version().0 == 18 => { + LLVMFeature::new("fast-unaligned-access") } // For LLVM 18, enable the evex512 target feature if a avx512 target feature is enabled. ("x86", s) if get_version().0 >= 18 && s.starts_with("avx512") => { |
