diff options
Diffstat (limited to 'compiler/rustc_abi/src')
| -rw-r--r-- | compiler/rustc_abi/src/callconv.rs | 5 | ||||
| -rw-r--r-- | compiler/rustc_abi/src/callconv/reg.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_abi/src/extern_abi.rs | 375 | ||||
| -rw-r--r-- | compiler/rustc_abi/src/extern_abi/tests.rs | 22 | ||||
| -rw-r--r-- | compiler/rustc_abi/src/layout.rs | 69 | ||||
| -rw-r--r-- | compiler/rustc_abi/src/lib.rs | 161 |
6 files changed, 263 insertions, 371 deletions
diff --git a/compiler/rustc_abi/src/callconv.rs b/compiler/rustc_abi/src/callconv.rs index daa365bf6e1..4529ab8058e 100644 --- a/compiler/rustc_abi/src/callconv.rs +++ b/compiler/rustc_abi/src/callconv.rs @@ -1,6 +1,5 @@ #[cfg(feature = "nightly")] -use crate::{BackendRepr, FieldsShape, TyAbiInterface, TyAndLayout}; -use crate::{Primitive, Size, Variants}; +use crate::{BackendRepr, FieldsShape, Primitive, Size, TyAbiInterface, TyAndLayout, Variants}; mod reg; @@ -66,8 +65,6 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { Ty: TyAbiInterface<'a, C> + Copy, { match self.backend_repr { - BackendRepr::Uninhabited => Err(Heterogeneous), - // The primitive for this algorithm. BackendRepr::Scalar(scalar) => { let kind = match scalar.primitive() { diff --git a/compiler/rustc_abi/src/callconv/reg.rs b/compiler/rustc_abi/src/callconv/reg.rs index 66f47c52c15..8cf140dbaad 100644 --- a/compiler/rustc_abi/src/callconv/reg.rs +++ b/compiler/rustc_abi/src/callconv/reg.rs @@ -57,7 +57,7 @@ impl Reg { 128 => dl.f128_align.abi, _ => panic!("unsupported float: {self:?}"), }, - RegKind::Vector => dl.vector_align(self.size).abi, + RegKind::Vector => dl.llvmlike_vector_align(self.size).abi, } } } diff --git a/compiler/rustc_abi/src/extern_abi.rs b/compiler/rustc_abi/src/extern_abi.rs index 130834d560f..543c2f8ab12 100644 --- a/compiler/rustc_abi/src/extern_abi.rs +++ b/compiler/rustc_abi/src/extern_abi.rs @@ -1,15 +1,19 @@ +use std::cmp::Ordering; use std::fmt; +use std::hash::{Hash, Hasher}; -use rustc_macros::{Decodable, Encodable, HashStable_Generic}; -use rustc_span::{Span, Symbol, sym}; +#[cfg(feature = "nightly")] +use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableOrd}; +#[cfg(feature = "nightly")] +use rustc_macros::{Decodable, Encodable}; #[cfg(test)] mod tests; use ExternAbi as Abi; -#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Clone, Copy, Debug)] -#[derive(HashStable_Generic, Encodable, Decodable)] +#[derive(Clone, Copy, Debug)] +#[cfg_attr(feature = "nightly", derive(Encodable, Decodable))] pub enum ExternAbi { // Some of the ABIs come first because every time we add a new ABI, we have to re-bless all the // hashing tests. These are used in many places, so giving them stable values reduces test @@ -69,279 +73,160 @@ pub enum ExternAbi { RiscvInterruptS, } -impl Abi { - pub fn supports_varargs(self) -> bool { - // * C and Cdecl obviously support varargs. - // * C can be based on Aapcs, SysV64 or Win64, so they must support varargs. - // * EfiApi is based on Win64 or C, so it also supports it. - // * System falls back to C for functions with varargs. - // - // * Stdcall does not, because it would be impossible for the callee to clean - // up the arguments. (callee doesn't know how many arguments are there) - // * Same for Fastcall, Vectorcall and Thiscall. - // * Other calling conventions are related to hardware or the compiler itself. - match self { - Self::C { .. } - | Self::Cdecl { .. } - | Self::System { .. } - | Self::Aapcs { .. } - | Self::Win64 { .. } - | Self::SysV64 { .. } - | Self::EfiApi => true, - _ => false, +macro_rules! abi_impls { + ($e_name:ident = { + $($variant:ident $({ unwind: $uw:literal })? =><= $tok:literal,)* + }) => { + impl $e_name { + pub const ALL_VARIANTS: &[Self] = &[ + $($e_name::$variant $({ unwind: $uw })*,)* + ]; + pub const fn as_str(&self) -> &'static str { + match self { + $($e_name::$variant $( { unwind: $uw } )* => $tok,)* + } + } + } + + impl ::core::str::FromStr for $e_name { + type Err = AbiFromStrErr; + fn from_str(s: &str) -> Result<$e_name, Self::Err> { + match s { + $($tok => Ok($e_name::$variant $({ unwind: $uw })*),)* + _ => Err(AbiFromStrErr::Unknown), + } + } } } } -#[derive(Copy, Clone)] -pub struct AbiData { - abi: Abi, +#[derive(Debug)] +pub enum AbiFromStrErr { + Unknown, +} - /// Name of this ABI as we like it called. - name: &'static str, +abi_impls! { + ExternAbi = { + C { unwind: false } =><= "C", + CCmseNonSecureCall =><= "C-cmse-nonsecure-call", + CCmseNonSecureEntry =><= "C-cmse-nonsecure-entry", + C { unwind: true } =><= "C-unwind", + Rust =><= "Rust", + Aapcs { unwind: false } =><= "aapcs", + Aapcs { unwind: true } =><= "aapcs-unwind", + AvrInterrupt =><= "avr-interrupt", + AvrNonBlockingInterrupt =><= "avr-non-blocking-interrupt", + Cdecl { unwind: false } =><= "cdecl", + Cdecl { unwind: true } =><= "cdecl-unwind", + EfiApi =><= "efiapi", + Fastcall { unwind: false } =><= "fastcall", + Fastcall { unwind: true } =><= "fastcall-unwind", + GpuKernel =><= "gpu-kernel", + Msp430Interrupt =><= "msp430-interrupt", + PtxKernel =><= "ptx-kernel", + RiscvInterruptM =><= "riscv-interrupt-m", + RiscvInterruptS =><= "riscv-interrupt-s", + RustCall =><= "rust-call", + RustCold =><= "rust-cold", + RustIntrinsic =><= "rust-intrinsic", + Stdcall { unwind: false } =><= "stdcall", + Stdcall { unwind: true } =><= "stdcall-unwind", + System { unwind: false } =><= "system", + System { unwind: true } =><= "system-unwind", + SysV64 { unwind: false } =><= "sysv64", + SysV64 { unwind: true } =><= "sysv64-unwind", + Thiscall { unwind: false } =><= "thiscall", + Thiscall { unwind: true } =><= "thiscall-unwind", + Unadjusted =><= "unadjusted", + Vectorcall { unwind: false } =><= "vectorcall", + Vectorcall { unwind: true } =><= "vectorcall-unwind", + Win64 { unwind: false } =><= "win64", + Win64 { unwind: true } =><= "win64-unwind", + X86Interrupt =><= "x86-interrupt", + } } -#[allow(non_upper_case_globals)] -const AbiDatas: &[AbiData] = &[ - AbiData { abi: Abi::Rust, name: "Rust" }, - AbiData { abi: Abi::C { unwind: false }, name: "C" }, - AbiData { abi: Abi::C { unwind: true }, name: "C-unwind" }, - AbiData { abi: Abi::Cdecl { unwind: false }, name: "cdecl" }, - AbiData { abi: Abi::Cdecl { unwind: true }, name: "cdecl-unwind" }, - AbiData { abi: Abi::Stdcall { unwind: false }, name: "stdcall" }, - AbiData { abi: Abi::Stdcall { unwind: true }, name: "stdcall-unwind" }, - AbiData { abi: Abi::Fastcall { unwind: false }, name: "fastcall" }, - AbiData { abi: Abi::Fastcall { unwind: true }, name: "fastcall-unwind" }, - AbiData { abi: Abi::Vectorcall { unwind: false }, name: "vectorcall" }, - AbiData { abi: Abi::Vectorcall { unwind: true }, name: "vectorcall-unwind" }, - AbiData { abi: Abi::Thiscall { unwind: false }, name: "thiscall" }, - AbiData { abi: Abi::Thiscall { unwind: true }, name: "thiscall-unwind" }, - AbiData { abi: Abi::Aapcs { unwind: false }, name: "aapcs" }, - AbiData { abi: Abi::Aapcs { unwind: true }, name: "aapcs-unwind" }, - AbiData { abi: Abi::Win64 { unwind: false }, name: "win64" }, - AbiData { abi: Abi::Win64 { unwind: true }, name: "win64-unwind" }, - AbiData { abi: Abi::SysV64 { unwind: false }, name: "sysv64" }, - AbiData { abi: Abi::SysV64 { unwind: true }, name: "sysv64-unwind" }, - AbiData { abi: Abi::PtxKernel, name: "ptx-kernel" }, - AbiData { abi: Abi::Msp430Interrupt, name: "msp430-interrupt" }, - AbiData { abi: Abi::X86Interrupt, name: "x86-interrupt" }, - AbiData { abi: Abi::GpuKernel, name: "gpu-kernel" }, - AbiData { abi: Abi::EfiApi, name: "efiapi" }, - AbiData { abi: Abi::AvrInterrupt, name: "avr-interrupt" }, - AbiData { abi: Abi::AvrNonBlockingInterrupt, name: "avr-non-blocking-interrupt" }, - AbiData { abi: Abi::CCmseNonSecureCall, name: "C-cmse-nonsecure-call" }, - AbiData { abi: Abi::CCmseNonSecureEntry, name: "C-cmse-nonsecure-entry" }, - AbiData { abi: Abi::System { unwind: false }, name: "system" }, - AbiData { abi: Abi::System { unwind: true }, name: "system-unwind" }, - AbiData { abi: Abi::RustIntrinsic, name: "rust-intrinsic" }, - AbiData { abi: Abi::RustCall, name: "rust-call" }, - AbiData { abi: Abi::Unadjusted, name: "unadjusted" }, - AbiData { abi: Abi::RustCold, name: "rust-cold" }, - AbiData { abi: Abi::RiscvInterruptM, name: "riscv-interrupt-m" }, - AbiData { abi: Abi::RiscvInterruptS, name: "riscv-interrupt-s" }, -]; +impl Ord for ExternAbi { + fn cmp(&self, rhs: &Self) -> Ordering { + self.as_str().cmp(rhs.as_str()) + } +} -#[derive(Copy, Clone, Debug)] -pub enum AbiUnsupported { - Unrecognized, - Reason { explain: &'static str }, +impl PartialOrd for ExternAbi { + fn partial_cmp(&self, rhs: &Self) -> Option<Ordering> { + Some(self.cmp(rhs)) + } } -/// Returns the ABI with the given name (if any). -pub fn lookup(name: &str) -> Result<Abi, AbiUnsupported> { - AbiDatas.iter().find(|abi_data| name == abi_data.name).map(|&x| x.abi).ok_or_else(|| match name { - "riscv-interrupt" => AbiUnsupported::Reason { - explain: "please use one of riscv-interrupt-m or riscv-interrupt-s for machine- or supervisor-level interrupts, respectively", - }, - "riscv-interrupt-u" => AbiUnsupported::Reason { - explain: "user-mode interrupt handlers have been removed from LLVM pending standardization, see: https://reviews.llvm.org/D149314", - }, - "wasm" => AbiUnsupported::Reason { - explain: "non-standard wasm ABI is no longer supported", - }, +impl PartialEq for ExternAbi { + fn eq(&self, rhs: &Self) -> bool { + self.cmp(rhs) == Ordering::Equal + } +} - _ => AbiUnsupported::Unrecognized, +impl Eq for ExternAbi {} - }) +impl Hash for ExternAbi { + fn hash<H: Hasher>(&self, state: &mut H) { + self.as_str().hash(state); + // double-assurance of a prefix breaker + u32::from_be_bytes(*b"ABI\0").hash(state); + } } -pub fn all_names() -> Vec<&'static str> { - AbiDatas.iter().map(|d| d.name).collect() +#[cfg(feature = "nightly")] +impl<C> HashStable<C> for ExternAbi { + #[inline] + fn hash_stable(&self, _: &mut C, hasher: &mut StableHasher) { + Hash::hash(self, hasher); + } } -pub fn enabled_names(features: &rustc_feature::Features, span: Span) -> Vec<&'static str> { - AbiDatas - .iter() - .map(|d| d.name) - .filter(|name| is_enabled(features, span, name).is_ok()) - .collect() -} +#[cfg(feature = "nightly")] +impl StableOrd for ExternAbi { + const CAN_USE_UNSTABLE_SORT: bool = true; -pub enum AbiDisabled { - Unstable { feature: Symbol, explain: &'static str }, - Unrecognized, + // because each ABI is hashed like a string, there is no possible instability + const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = (); } -pub fn is_enabled( - features: &rustc_feature::Features, - span: Span, - name: &str, -) -> Result<(), AbiDisabled> { - let s = is_stable(name); - if let Err(AbiDisabled::Unstable { feature, .. }) = s { - if features.enabled(feature) || span.allows_unstable(feature) { - return Ok(()); +impl ExternAbi { + pub fn supports_varargs(self) -> bool { + // * C and Cdecl obviously support varargs. + // * C can be based on Aapcs, SysV64 or Win64, so they must support varargs. + // * EfiApi is based on Win64 or C, so it also supports it. + // + // * Stdcall does not, because it would be impossible for the callee to clean + // up the arguments. (callee doesn't know how many arguments are there) + // * Same for Fastcall, Vectorcall and Thiscall. + // * Other calling conventions are related to hardware or the compiler itself. + match self { + Self::C { .. } + | Self::Cdecl { .. } + | Self::Aapcs { .. } + | Self::Win64 { .. } + | Self::SysV64 { .. } + | Self::EfiApi => true, + _ => false, } } - s } -/// Returns whether the ABI is stable to use. -/// -/// Note that there is a separate check determining whether the ABI is even supported -/// on the current target; see `fn is_abi_supported` in `rustc_target::spec`. -pub fn is_stable(name: &str) -> Result<(), AbiDisabled> { - match name { - // Stable - "Rust" | "C" | "C-unwind" | "cdecl" | "cdecl-unwind" | "stdcall" | "stdcall-unwind" - | "fastcall" | "fastcall-unwind" | "aapcs" | "aapcs-unwind" | "win64" | "win64-unwind" - | "sysv64" | "sysv64-unwind" | "system" | "system-unwind" | "efiapi" | "thiscall" - | "thiscall-unwind" => Ok(()), - "rust-intrinsic" => Err(AbiDisabled::Unstable { - feature: sym::intrinsics, - explain: "intrinsics are subject to change", - }), - "vectorcall" => Err(AbiDisabled::Unstable { - feature: sym::abi_vectorcall, - explain: "vectorcall is experimental and subject to change", - }), - "vectorcall-unwind" => Err(AbiDisabled::Unstable { - feature: sym::abi_vectorcall, - explain: "vectorcall-unwind ABI is experimental and subject to change", - }), - "rust-call" => Err(AbiDisabled::Unstable { - feature: sym::unboxed_closures, - explain: "rust-call ABI is subject to change", - }), - "rust-cold" => Err(AbiDisabled::Unstable { - feature: sym::rust_cold_cc, - explain: "rust-cold is experimental and subject to change", - }), - "ptx-kernel" => Err(AbiDisabled::Unstable { - feature: sym::abi_ptx, - explain: "PTX ABIs are experimental and subject to change", - }), - "unadjusted" => Err(AbiDisabled::Unstable { - feature: sym::abi_unadjusted, - explain: "unadjusted ABI is an implementation detail and perma-unstable", - }), - "msp430-interrupt" => Err(AbiDisabled::Unstable { - feature: sym::abi_msp430_interrupt, - explain: "msp430-interrupt ABI is experimental and subject to change", - }), - "x86-interrupt" => Err(AbiDisabled::Unstable { - feature: sym::abi_x86_interrupt, - explain: "x86-interrupt ABI is experimental and subject to change", - }), - "gpu-kernel" => Err(AbiDisabled::Unstable { - feature: sym::abi_gpu_kernel, - explain: "gpu-kernel ABI is experimental and subject to change", - }), - "avr-interrupt" | "avr-non-blocking-interrupt" => Err(AbiDisabled::Unstable { - feature: sym::abi_avr_interrupt, - explain: "avr-interrupt and avr-non-blocking-interrupt ABIs are experimental and subject to change", - }), - "riscv-interrupt-m" | "riscv-interrupt-s" => Err(AbiDisabled::Unstable { - feature: sym::abi_riscv_interrupt, - explain: "riscv-interrupt ABIs are experimental and subject to change", - }), - "C-cmse-nonsecure-call" => Err(AbiDisabled::Unstable { - feature: sym::abi_c_cmse_nonsecure_call, - explain: "C-cmse-nonsecure-call ABI is experimental and subject to change", - }), - "C-cmse-nonsecure-entry" => Err(AbiDisabled::Unstable { - feature: sym::cmse_nonsecure_entry, - explain: "C-cmse-nonsecure-entry ABI is experimental and subject to change", - }), - _ => Err(AbiDisabled::Unrecognized), - } +pub fn all_names() -> Vec<&'static str> { + ExternAbi::ALL_VARIANTS.iter().map(|abi| abi.as_str()).collect() } -impl Abi { +impl ExternAbi { /// Default ABI chosen for `extern fn` declarations without an explicit ABI. pub const FALLBACK: Abi = Abi::C { unwind: false }; - #[inline] - pub fn index(self) -> usize { - // N.B., this ordering MUST match the AbiDatas array above. - // (This is ensured by the test indices_are_correct().) - use Abi::*; - let i = match self { - // Cross-platform ABIs - Rust => 0, - C { unwind: false } => 1, - C { unwind: true } => 2, - // Platform-specific ABIs - Cdecl { unwind: false } => 3, - Cdecl { unwind: true } => 4, - Stdcall { unwind: false } => 5, - Stdcall { unwind: true } => 6, - Fastcall { unwind: false } => 7, - Fastcall { unwind: true } => 8, - Vectorcall { unwind: false } => 9, - Vectorcall { unwind: true } => 10, - Thiscall { unwind: false } => 11, - Thiscall { unwind: true } => 12, - Aapcs { unwind: false } => 13, - Aapcs { unwind: true } => 14, - Win64 { unwind: false } => 15, - Win64 { unwind: true } => 16, - SysV64 { unwind: false } => 17, - SysV64 { unwind: true } => 18, - PtxKernel => 19, - Msp430Interrupt => 20, - X86Interrupt => 21, - GpuKernel => 22, - EfiApi => 23, - AvrInterrupt => 24, - AvrNonBlockingInterrupt => 25, - CCmseNonSecureCall => 26, - CCmseNonSecureEntry => 27, - // Cross-platform ABIs - System { unwind: false } => 28, - System { unwind: true } => 29, - RustIntrinsic => 30, - RustCall => 31, - Unadjusted => 32, - RustCold => 33, - RiscvInterruptM => 34, - RiscvInterruptS => 35, - }; - debug_assert!( - AbiDatas - .iter() - .enumerate() - .find(|(_, AbiData { abi, .. })| *abi == self) - .map(|(index, _)| index) - .expect("abi variant has associated data") - == i, - "Abi index did not match `AbiDatas` ordering" - ); - i - } - - #[inline] - pub fn data(self) -> &'static AbiData { - &AbiDatas[self.index()] - } - pub fn name(self) -> &'static str { - self.data().name + self.as_str() } } -impl fmt::Display for Abi { +impl fmt::Display for ExternAbi { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "\"{}\"", self.name()) + write!(f, "\"{}\"", self.as_str()) } } diff --git a/compiler/rustc_abi/src/extern_abi/tests.rs b/compiler/rustc_abi/src/extern_abi/tests.rs index 4823058dd69..fc546a6570f 100644 --- a/compiler/rustc_abi/src/extern_abi/tests.rs +++ b/compiler/rustc_abi/src/extern_abi/tests.rs @@ -1,29 +1,31 @@ use std::assert_matches::assert_matches; +use std::str::FromStr; use super::*; #[allow(non_snake_case)] #[test] fn lookup_Rust() { - let abi = lookup("Rust"); - assert!(abi.is_ok() && abi.unwrap().data().name == "Rust"); + let abi = ExternAbi::from_str("Rust"); + assert!(abi.is_ok() && abi.unwrap().as_str() == "Rust"); } #[test] fn lookup_cdecl() { - let abi = lookup("cdecl"); - assert!(abi.is_ok() && abi.unwrap().data().name == "cdecl"); + let abi = ExternAbi::from_str("cdecl"); + assert!(abi.is_ok() && abi.unwrap().as_str() == "cdecl"); } #[test] fn lookup_baz() { - let abi = lookup("baz"); - assert_matches!(abi, Err(AbiUnsupported::Unrecognized)); + let abi = ExternAbi::from_str("baz"); + assert_matches!(abi, Err(AbiFromStrErr::Unknown)); } #[test] -fn indices_are_correct() { - for (i, abi_data) in AbiDatas.iter().enumerate() { - assert_eq!(i, abi_data.abi.index()); - } +fn guarantee_lexicographic_ordering() { + let abis = ExternAbi::ALL_VARIANTS; + let mut sorted_abis = abis.to_vec(); + sorted_abis.sort_unstable(); + assert_eq!(abis, sorted_abis); } diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index b8773f9ff38..3f83787ea37 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -2,6 +2,7 @@ use std::fmt::{self, Write}; use std::ops::{Bound, Deref}; use std::{cmp, iter}; +use rustc_hashes::Hash64; use rustc_index::Idx; use tracing::debug; @@ -129,11 +130,12 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { }, backend_repr: BackendRepr::ScalarPair(a, b), largest_niche, + uninhabited: false, align, size, max_repr_align: None, unadjusted_abi_align: align.abi, - randomization_seed: combined_seed, + randomization_seed: Hash64::new(combined_seed), } } @@ -220,13 +222,14 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { LayoutData { variants: Variants::Empty, fields: FieldsShape::Primitive, - backend_repr: BackendRepr::Uninhabited, + backend_repr: BackendRepr::Memory { sized: true }, largest_niche: None, + uninhabited: true, align: dl.i8_align, size: Size::ZERO, max_repr_align: None, unadjusted_abi_align: dl.i8_align.abi, - randomization_seed: 0, + randomization_seed: Hash64::ZERO, } } @@ -307,10 +310,10 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align }; let mut max_repr_align = repr.align; - // If all the non-ZST fields have the same ABI and union ABI optimizations aren't - // disabled, we can use that common ABI for the union as a whole. + // If all the non-ZST fields have the same repr and union repr optimizations aren't + // disabled, we can use that common repr for the union as a whole. struct AbiMismatch; - let mut common_non_zst_abi_and_align = if repr.inhibits_union_abi_opt() { + let mut common_non_zst_repr_and_align = if repr.inhibits_union_abi_opt() { // Can't optimize Err(AbiMismatch) } else { @@ -334,14 +337,14 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { continue; } - if let Ok(common) = common_non_zst_abi_and_align { + if let Ok(common) = common_non_zst_repr_and_align { // Discard valid range information and allow undef let field_abi = field.backend_repr.to_union(); if let Some((common_abi, common_align)) = common { if common_abi != field_abi { // Different fields have different ABI: disable opt - common_non_zst_abi_and_align = Err(AbiMismatch); + common_non_zst_repr_and_align = Err(AbiMismatch); } else { // Fields with the same non-Aggregate ABI should also // have the same alignment @@ -354,7 +357,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { } } else { // First non-ZST field: record its ABI and alignment - common_non_zst_abi_and_align = Ok(Some((field_abi, field.align.abi))); + common_non_zst_repr_and_align = Ok(Some((field_abi, field.align.abi))); } } } @@ -373,16 +376,25 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { // If all non-ZST fields have the same ABI, we may forward that ABI // for the union as a whole, unless otherwise inhibited. - let abi = match common_non_zst_abi_and_align { + let backend_repr = match common_non_zst_repr_and_align { Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true }, - Ok(Some((abi, _))) => { - if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) { - // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt + Ok(Some((repr, _))) => match repr { + // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt + BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _) + if repr.scalar_align(dl).unwrap() != align.abi => + { BackendRepr::Memory { sized: true } - } else { - abi } - } + // Vectors require at least element alignment, else disable the opt + BackendRepr::Vector { element, count: _ } if element.align(dl).abi > align.abi => { + BackendRepr::Memory { sized: true } + } + // the alignment tests passed and we can use this + BackendRepr::Scalar(..) + | BackendRepr::ScalarPair(..) + | BackendRepr::Vector { .. } + | BackendRepr::Memory { .. } => repr, + }, }; let Some(union_field_count) = NonZeroUsize::new(only_variant.len()) else { @@ -397,8 +409,9 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { Ok(LayoutData { variants: Variants::Single { index: only_variant_idx }, fields: FieldsShape::Union(union_field_count), - backend_repr: abi, + backend_repr, largest_niche: None, + uninhabited: false, align, size: size.align_to(align.abi), max_repr_align, @@ -446,7 +459,6 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { Scalar::Union { .. } => {} }; match &mut st.backend_repr { - BackendRepr::Uninhabited => {} BackendRepr::Scalar(scalar) => hide_niches(scalar), BackendRepr::ScalarPair(a, b) => { hide_niches(a); @@ -638,9 +650,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { let same_size = size == variant_layouts[largest_variant_index].size; let same_align = align == variant_layouts[largest_variant_index].align; - let abi = if variant_layouts.iter().all(|v| v.is_uninhabited()) { - BackendRepr::Uninhabited - } else if same_size && same_align && others_zst { + let uninhabited = variant_layouts.iter().all(|v| v.is_uninhabited()); + let abi = if same_size && same_align && others_zst { match variant_layouts[largest_variant_index].backend_repr { // When the total alignment and size match, we can use the // same ABI as the scalar variant with the reserved niche. @@ -682,6 +693,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { }, backend_repr: abi, largest_niche, + uninhabited, size, align, max_repr_align, @@ -852,9 +864,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { }; let mut abi = BackendRepr::Memory { sized: true }; - if layout_variants.iter().all(|v| v.is_uninhabited()) { - abi = BackendRepr::Uninhabited; - } else if tag.size(dl) == size { + let uninhabited = layout_variants.iter().all(|v| v.is_uninhabited()); + if tag.size(dl) == size { // Make sure we only use scalar layout when the enum is entirely its // own tag (i.e. it has no padding nor any non-ZST variant fields). abi = BackendRepr::Scalar(tag); @@ -994,6 +1005,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { memory_index: [0].into(), }, largest_niche, + uninhabited, backend_repr: abi, align, size, @@ -1058,7 +1070,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { // unsizable tail fields are excluded so that we use the same seed for the sized and unsized layouts. let field_seed = fields_excluding_tail .iter() - .fold(0u64, |acc, f| acc.wrapping_add(f.randomization_seed)); + .fold(Hash64::ZERO, |acc, f| acc.wrapping_add(f.randomization_seed)); if optimize_field_order && fields.len() > 1 { // If `-Z randomize-layout` was enabled for the type definition we can shuffle @@ -1072,7 +1084,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { // `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field // ordering. let mut rng = rand_xoshiro::Xoshiro128StarStar::seed_from_u64( - field_seed.wrapping_add(repr.field_shuffle_seed), + field_seed.wrapping_add(repr.field_shuffle_seed).as_u64(), ); // Shuffle the ordering of the fields. @@ -1354,9 +1366,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { _ => {} } } - if fields.iter().any(|f| f.is_uninhabited()) { - abi = BackendRepr::Uninhabited; - } + let uninhabited = fields.iter().any(|f| f.is_uninhabited()); let unadjusted_abi_align = if repr.transparent() { match layout_of_single_non_zst_field { @@ -1377,6 +1387,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { fields: FieldsShape::Arbitrary { offsets, memory_index }, backend_repr: abi, largest_niche, + uninhabited, align, size, max_repr_align, diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index fc34b288933..81e4e255f37 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -50,25 +50,20 @@ use std::str::FromStr; use bitflags::bitflags; #[cfg(feature = "nightly")] use rustc_data_structures::stable_hasher::StableOrd; +use rustc_hashes::Hash64; use rustc_index::{Idx, IndexSlice, IndexVec}; #[cfg(feature = "nightly")] -use rustc_macros::HashStable_Generic; -#[cfg(feature = "nightly")] -use rustc_macros::{Decodable_Generic, Encodable_Generic}; +use rustc_macros::{Decodable_Generic, Encodable_Generic, HashStable_Generic}; mod callconv; mod layout; #[cfg(test)] mod tests; -#[cfg(feature = "nightly")] mod extern_abi; pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind}; -#[cfg(feature = "nightly")] -pub use extern_abi::{ - AbiDisabled, AbiUnsupported, ExternAbi, all_names, enabled_names, is_enabled, is_stable, lookup, -}; +pub use extern_abi::{ExternAbi, all_names}; #[cfg(feature = "nightly")] pub use layout::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx}; pub use layout::{LayoutCalculator, LayoutCalculatorError}; @@ -146,7 +141,7 @@ pub struct ReprOptions { /// hash without loss, but it does pay the price of being larger. /// Everything's a tradeoff, a 64-bit seed should be sufficient for our /// purposes (primarily `-Z randomize-layout`) - pub field_shuffle_seed: u64, + pub field_shuffle_seed: Hash64, } impl ReprOptions { @@ -334,19 +329,19 @@ impl TargetDataLayout { [p] if p.starts_with('P') => { dl.instruction_address_space = parse_address_space(&p[1..], "P")? } - ["a", ref a @ ..] => dl.aggregate_align = parse_align(a, "a")?, - ["f16", ref a @ ..] => dl.f16_align = parse_align(a, "f16")?, - ["f32", ref a @ ..] => dl.f32_align = parse_align(a, "f32")?, - ["f64", ref a @ ..] => dl.f64_align = parse_align(a, "f64")?, - ["f128", ref a @ ..] => dl.f128_align = parse_align(a, "f128")?, + ["a", a @ ..] => dl.aggregate_align = parse_align(a, "a")?, + ["f16", a @ ..] => dl.f16_align = parse_align(a, "f16")?, + ["f32", a @ ..] => dl.f32_align = parse_align(a, "f32")?, + ["f64", a @ ..] => dl.f64_align = parse_align(a, "f64")?, + ["f128", a @ ..] => dl.f128_align = parse_align(a, "f128")?, // FIXME(erikdesjardins): we should be parsing nonzero address spaces // this will require replacing TargetDataLayout::{pointer_size,pointer_align} // with e.g. `fn pointer_size_in(AddressSpace)` - [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => { + [p @ "p", s, a @ ..] | [p @ "p0", s, a @ ..] => { dl.pointer_size = parse_size(s, p)?; dl.pointer_align = parse_align(a, p)?; } - [s, ref a @ ..] if s.starts_with('i') => { + [s, a @ ..] if s.starts_with('i') => { let Ok(bits) = s[1..].parse::<u64>() else { parse_size(&s[1..], "i")?; // For the user error. continue; @@ -367,7 +362,7 @@ impl TargetDataLayout { dl.i128_align = a; } } - [s, ref a @ ..] if s.starts_with('v') => { + [s, a @ ..] if s.starts_with('v') => { let v_size = parse_size(&s[1..], "v")?; let a = parse_align(a, s)?; if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) { @@ -413,16 +408,21 @@ impl TargetDataLayout { } } + /// psABI-mandated alignment for a vector type, if any #[inline] - pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign { - for &(size, align) in &self.vector_align { - if size == vec_size { - return align; - } - } - // Default to natural alignment, which is what LLVM does. - // That is, use the size, rounded up to a power of 2. - AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap()) + fn cabi_vector_align(&self, vec_size: Size) -> Option<AbiAndPrefAlign> { + self.vector_align + .iter() + .find(|(size, _align)| *size == vec_size) + .map(|(_size, align)| *align) + } + + /// an alignment resembling the one LLVM would pick for a vector + #[inline] + pub fn llvmlike_vector_align(&self, vec_size: Size) -> AbiAndPrefAlign { + self.cabi_vector_align(vec_size).unwrap_or(AbiAndPrefAlign::new( + Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap(), + )) } } @@ -815,20 +815,19 @@ impl Align { self.bits().try_into().unwrap() } - /// Computes the best alignment possible for the given offset - /// (the largest power of two that the offset is a multiple of). + /// Obtain the greatest factor of `size` that is an alignment + /// (the largest power of two the Size is a multiple of). /// - /// N.B., for an offset of `0`, this happens to return `2^64`. + /// Note that all numbers are factors of 0 #[inline] - pub fn max_for_offset(offset: Size) -> Align { - Align { pow2: offset.bytes().trailing_zeros() as u8 } + pub fn max_aligned_factor(size: Size) -> Align { + Align { pow2: size.bytes().trailing_zeros() as u8 } } - /// Lower the alignment, if necessary, such that the given offset - /// is aligned to it (the offset is a multiple of the alignment). + /// Reduces Align to an aligned factor of `size`. #[inline] - pub fn restrict_for_offset(self, offset: Size) -> Align { - self.min(Align::max_for_offset(offset)) + pub fn restrict_for_offset(self, size: Size) -> Align { + self.min(Align::max_aligned_factor(size)) } } @@ -1350,7 +1349,7 @@ impl<FieldIdx: Idx> FieldsShape<FieldIdx> { /// Gets source indices of the fields by increasing offsets. #[inline] - pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> + '_ { + pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> { let mut inverse_small = [0u8; 64]; let mut inverse_big = IndexVec::new(); let use_small = self.count() <= inverse_small.len(); @@ -1409,7 +1408,6 @@ impl AddressSpace { #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] #[cfg_attr(feature = "nightly", derive(HashStable_Generic))] pub enum BackendRepr { - Uninhabited, Scalar(Scalar), ScalarPair(Scalar, Scalar), Vector { @@ -1428,10 +1426,9 @@ impl BackendRepr { #[inline] pub fn is_unsized(&self) -> bool { match *self { - BackendRepr::Uninhabited - | BackendRepr::Scalar(_) - | BackendRepr::ScalarPair(..) - | BackendRepr::Vector { .. } => false, + BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => { + false + } BackendRepr::Memory { sized } => !sized, } } @@ -1450,12 +1447,6 @@ impl BackendRepr { } } - /// Returns `true` if this is an uninhabited type - #[inline] - pub fn is_uninhabited(&self) -> bool { - matches!(*self, BackendRepr::Uninhabited) - } - /// Returns `true` if this is a scalar type #[inline] pub fn is_scalar(&self) -> bool { @@ -1468,37 +1459,38 @@ impl BackendRepr { matches!(*self, BackendRepr::Scalar(s) if s.is_bool()) } - /// Returns the fixed alignment of this ABI, if any is mandated. - pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> { - Some(match *self { - BackendRepr::Scalar(s) => s.align(cx), - BackendRepr::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)), - BackendRepr::Vector { element, count } => { - cx.data_layout().vector_align(element.size(cx) * count) - } - BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None, - }) + /// The psABI alignment for a `Scalar` or `ScalarPair` + /// + /// `None` for other variants. + pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> { + match *self { + BackendRepr::Scalar(s) => Some(s.align(cx).abi), + BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi), + // The align of a Vector can vary in surprising ways + BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => None, + } } - /// Returns the fixed size of this ABI, if any is mandated. - pub fn inherent_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> { - Some(match *self { - BackendRepr::Scalar(s) => { - // No padding in scalars. - s.size(cx) - } + /// The psABI size for a `Scalar` or `ScalarPair` + /// + /// `None` for other variants + pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> { + match *self { + // No padding in scalars. + BackendRepr::Scalar(s) => Some(s.size(cx)), + // May have some padding between the pair. BackendRepr::ScalarPair(s1, s2) => { - // May have some padding between the pair. let field2_offset = s1.size(cx).align_to(s2.align(cx).abi); - (field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi) + let size = (field2_offset + s2.size(cx)).align_to( + self.scalar_align(cx) + // We absolutely must have an answer here or everything is FUBAR. + .unwrap(), + ); + Some(size) } - BackendRepr::Vector { element, count } => { - // No padding in vectors, except possibly for trailing padding - // to make the size a multiple of align (e.g. for vectors of size 3). - (element.size(cx) * count).align_to(self.inherent_align(cx)?.abi) - } - BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None, - }) + // The size of a Vector can vary in surprising ways + BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => None, + } } /// Discard validity range information and allow undef. @@ -1511,9 +1503,7 @@ impl BackendRepr { BackendRepr::Vector { element, count } => { BackendRepr::Vector { element: element.to_union(), count } } - BackendRepr::Uninhabited | BackendRepr::Memory { .. } => { - BackendRepr::Memory { sized: true } - } + BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true }, } } @@ -1709,6 +1699,11 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> { /// The leaf scalar with the largest number of invalid values /// (i.e. outside of its `valid_range`), if it exists. pub largest_niche: Option<Niche>, + /// Is this type known to be uninhabted? + /// + /// This is separate from BackendRepr because uninhabited return types can affect ABI, + /// especially in the case of by-pointer struct returns, which allocate stack even when unused. + pub uninhabited: bool, pub align: AbiAndPrefAlign, pub size: Size, @@ -1733,21 +1728,21 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> { /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose /// to reorder its fields based on that information. The current implementation is a conservative /// approximation of this goal. - pub randomization_seed: u64, + pub randomization_seed: Hash64, } impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { /// Returns `true` if this is an aggregate type (including a ScalarPair!) pub fn is_aggregate(&self) -> bool { match self.backend_repr { - BackendRepr::Uninhabited | BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false, + BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true, } } /// Returns `true` if this is an uninhabited type pub fn is_uninhabited(&self) -> bool { - self.backend_repr.is_uninhabited() + self.uninhabited } pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self { @@ -1783,11 +1778,12 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { fields: FieldsShape::Primitive, backend_repr: BackendRepr::Scalar(scalar), largest_niche, + uninhabited: false, size, align, max_repr_align: None, unadjusted_abi_align: align.abi, - randomization_seed, + randomization_seed: Hash64::new(randomization_seed), } } } @@ -1807,10 +1803,11 @@ where backend_repr, fields, largest_niche, + uninhabited, variants, max_repr_align, unadjusted_abi_align, - ref randomization_seed, + randomization_seed, } = self; f.debug_struct("Layout") .field("size", size) @@ -1818,6 +1815,7 @@ where .field("abi", backend_repr) .field("fields", fields) .field("largest_niche", largest_niche) + .field("uninhabited", uninhabited) .field("variants", variants) .field("max_repr_align", max_repr_align) .field("unadjusted_abi_align", unadjusted_abi_align) @@ -1882,7 +1880,6 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => { false } - BackendRepr::Uninhabited => self.size.bytes() == 0, BackendRepr::Memory { sized } => sized && self.size.bytes() == 0, } } |
