diff options
| author | Jubilee Young <workingjubilee@gmail.com> | 2025-09-27 20:00:54 -0700 |
|---|---|---|
| committer | Jubilee Young <workingjubilee@gmail.com> | 2025-09-27 22:13:53 -0700 |
| commit | b3f3e36c72952736be5b9e0360ee5b86148f2c29 (patch) | |
| tree | 42ba96ca2f49a181d491ffe9e75ea8e9d5c02a0d | |
| parent | 4082d6a3f0347c2fc4b8c8d5a6a38ed7248fa161 (diff) | |
| download | rust-b3f3e36c72952736be5b9e0360ee5b86148f2c29.tar.gz rust-b3f3e36c72952736be5b9e0360ee5b86148f2c29.zip | |
compiler: remove AbiAlign inside TargetDataLayout
This maintains AbiAlign usage in public API and most of the compiler, but direct access of these fields is now in terms of Align only.
| -rw-r--r-- | compiler/rustc_abi/src/callconv/reg.rs | 22 | ||||
| -rw-r--r-- | compiler/rustc_abi/src/layout.rs | 65 | ||||
| -rw-r--r-- | compiler/rustc_abi/src/layout/simple.rs | 27 | ||||
| -rw-r--r-- | compiler/rustc_abi/src/lib.rs | 85 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/consts.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/intrinsic.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_codegen_llvm/src/va_arg.rs | 8 | ||||
| -rw-r--r-- | compiler/rustc_session/src/config/cfg.rs | 10 | ||||
| -rw-r--r-- | compiler/rustc_target/src/callconv/mips.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_target/src/callconv/mips64.rs | 4 | ||||
| -rw-r--r-- | compiler/rustc_target/src/callconv/mod.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_target/src/callconv/sparc.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_target/src/callconv/sparc64.rs | 2 |
13 files changed, 115 insertions, 120 deletions
diff --git a/compiler/rustc_abi/src/callconv/reg.rs b/compiler/rustc_abi/src/callconv/reg.rs index 8cf140dbaad..66c8056d0c2 100644 --- a/compiler/rustc_abi/src/callconv/reg.rs +++ b/compiler/rustc_abi/src/callconv/reg.rs @@ -42,22 +42,22 @@ impl Reg { let dl = cx.data_layout(); match self.kind { RegKind::Integer => match self.size.bits() { - 1 => dl.i1_align.abi, - 2..=8 => dl.i8_align.abi, - 9..=16 => dl.i16_align.abi, - 17..=32 => dl.i32_align.abi, - 33..=64 => dl.i64_align.abi, - 65..=128 => dl.i128_align.abi, + 1 => dl.i1_align, + 2..=8 => dl.i8_align, + 9..=16 => dl.i16_align, + 17..=32 => dl.i32_align, + 33..=64 => dl.i64_align, + 65..=128 => dl.i128_align, _ => panic!("unsupported integer: {self:?}"), }, RegKind::Float => match self.size.bits() { - 16 => dl.f16_align.abi, - 32 => dl.f32_align.abi, - 64 => dl.f64_align.abi, - 128 => dl.f128_align.abi, + 16 => dl.f16_align, + 32 => dl.f32_align, + 64 => dl.f64_align, + 128 => dl.f128_align, _ => panic!("unsupported float: {self:?}"), }, - RegKind::Vector => dl.llvmlike_vector_align(self.size).abi, + RegKind::Vector => dl.llvmlike_vector_align(self.size), } } } diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 5004d0c8022..09b4322e299 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -174,11 +174,11 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { // Non-power-of-two vectors have padding up to the next power-of-two. // If we're a packed repr, remove the padding while keeping the alignment as close // to a vector as possible. - (BackendRepr::Memory { sized: true }, AbiAlign { abi: Align::max_aligned_factor(size) }) + (BackendRepr::Memory { sized: true }, Align::max_aligned_factor(size)) } else { (BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size)) }; - let size = size.align_to(align.abi); + let size = size.align_to(align); Ok(LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, @@ -190,7 +190,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { largest_niche: elt.largest_niche, uninhabited: false, size, - align, + align: AbiAlign::new(align), max_repr_align: None, unadjusted_abi_align: elt.align.abi, randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)), @@ -388,7 +388,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { return Err(LayoutCalculatorError::UnexpectedUnsized(*field)); } - align = align.max(field.align); + align = align.max(field.align.abi); max_repr_align = max_repr_align.max(field.max_repr_align); size = cmp::max(size, field.size); @@ -423,13 +423,13 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { } if let Some(pack) = repr.pack { - align = align.min(AbiAlign::new(pack)); + align = align.min(pack); } // The unadjusted ABI alignment does not include repr(align), but does include repr(pack). // See documentation on `LayoutData::unadjusted_abi_align`. - let unadjusted_abi_align = align.abi; + let unadjusted_abi_align = align; if let Some(repr_align) = repr.align { - align = align.max(AbiAlign::new(repr_align)); + align = align.max(repr_align); } // `align` must not be modified after this, or `unadjusted_abi_align` could be inaccurate. let align = align; @@ -441,14 +441,12 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { Ok(Some((repr, _))) => match repr { // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt BackendRepr::Scalar(_) | BackendRepr::ScalarPair(_, _) - if repr.scalar_align(dl).unwrap() != align.abi => + if repr.scalar_align(dl).unwrap() != align => { BackendRepr::Memory { sized: true } } // Vectors require at least element alignment, else disable the opt - BackendRepr::SimdVector { element, count: _ } - if element.align(dl).abi > align.abi => - { + BackendRepr::SimdVector { element, count: _ } if element.align(dl).abi > align => { BackendRepr::Memory { sized: true } } // the alignment tests passed and we can use this @@ -474,8 +472,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { backend_repr, largest_niche: None, uninhabited: false, - align, - size: size.align_to(align.abi), + align: AbiAlign::new(align), + size: size.align_to(align), max_repr_align, unadjusted_abi_align, randomization_seed: combined_seed, @@ -611,7 +609,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { let mut align = dl.aggregate_align; let mut max_repr_align = repr.align; - let mut unadjusted_abi_align = align.abi; + let mut unadjusted_abi_align = align; let mut variant_layouts = variants .iter_enumerated() @@ -619,7 +617,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { let mut st = self.univariant(v, repr, StructKind::AlwaysSized).ok()?; st.variants = Variants::Single { index: j }; - align = align.max(st.align); + align = align.max(st.align.abi); max_repr_align = max_repr_align.max(st.max_repr_align); unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align); @@ -646,7 +644,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { let (niche_start, niche_scalar) = niche.reserve(dl, count)?; let niche_offset = niche.offset; let niche_size = niche.value.size(dl); - let size = variant_layouts[largest_variant_index].size.align_to(align.abi); + let size = variant_layouts[largest_variant_index].size.align_to(align); let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| { if i == largest_variant_index { @@ -699,7 +697,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { .iter_enumerated() .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO); let same_size = size == variant_layouts[largest_variant_index].size; - let same_align = align == variant_layouts[largest_variant_index].align; + let same_align = align == variant_layouts[largest_variant_index].align.abi; let uninhabited = variant_layouts.iter().all(|v| v.is_uninhabited()); let abi = if same_size && same_align && others_zst { @@ -746,7 +744,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { largest_niche, uninhabited, size, - align, + align: AbiAlign::new(align), max_repr_align, unadjusted_abi_align, randomization_seed: combined_seed, @@ -818,7 +816,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { let mut align = dl.aggregate_align; let mut max_repr_align = repr.align; - let mut unadjusted_abi_align = align.abi; + let mut unadjusted_abi_align = align; let mut size = Size::ZERO; @@ -860,7 +858,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { } } size = cmp::max(size, st.size); - align = align.max(st.align); + align = align.max(st.align.abi); max_repr_align = max_repr_align.max(st.max_repr_align); unadjusted_abi_align = unadjusted_abi_align.max(st.unadjusted_abi_align); Ok(st) @@ -868,7 +866,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { .collect::<Result<IndexVec<VariantIdx, _>, _>>()?; // Align the maximum variant size to the largest alignment. - size = size.align_to(align.abi); + size = size.align_to(align); // FIXME(oli-obk): deduplicate and harden these checks if size.bytes() >= dl.obj_size_bound() { @@ -1042,7 +1040,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { }; if pair_offsets[FieldIdx::new(0)] == Size::ZERO && pair_offsets[FieldIdx::new(1)] == *offset - && align == pair.align + && align == pair.align.abi && size == pair.size { // We can use `ScalarPair` only when it matches our @@ -1066,7 +1064,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { // Also need to bump up the size and alignment, so that the entire value fits // in here. variant.size = cmp::max(variant.size, size); - variant.align.abi = cmp::max(variant.align.abi, align.abi); + variant.align.abi = cmp::max(variant.align.abi, align); } } } @@ -1092,7 +1090,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { largest_niche, uninhabited, backend_repr: abi, - align, + align: AbiAlign::new(align), size, max_repr_align, unadjusted_abi_align, @@ -1288,7 +1286,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { if let StructKind::Prefixed(prefix_size, prefix_align) = kind { let prefix_align = if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align }; - align = align.max(AbiAlign::new(prefix_align)); + align = align.max(prefix_align); offset = prefix_size.align_to(prefix_align); } for &i in &inverse_memory_index { @@ -1312,7 +1310,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { field.align }; offset = offset.align_to(field_align.abi); - align = align.max(field_align); + align = align.max(field_align.abi); max_repr_align = max_repr_align.max(field.max_repr_align); debug!("univariant offset: {:?} field: {:#?}", offset, field); @@ -1339,9 +1337,9 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { // The unadjusted ABI alignment does not include repr(align), but does include repr(pack). // See documentation on `LayoutData::unadjusted_abi_align`. - let unadjusted_abi_align = align.abi; + let unadjusted_abi_align = align; if let Some(repr_align) = repr.align { - align = align.max(AbiAlign::new(repr_align)); + align = align.max(repr_align); } // `align` must not be modified after this point, or `unadjusted_abi_align` could be inaccurate. let align = align; @@ -1360,7 +1358,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices())); inverse_memory_index.into_iter().map(|it| it.index() as u32).collect() }; - let size = min_size.align_to(align.abi); + let size = min_size.align_to(align); // FIXME(oli-obk): deduplicate and harden these checks if size.bytes() >= dl.obj_size_bound() { return Err(LayoutCalculatorError::SizeOverflow); @@ -1383,8 +1381,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { layout_of_single_non_zst_field = Some(field); // Field fills the struct and it has a scalar or scalar pair ABI. - if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size - { + if offsets[i].bytes() == 0 && align == field.align.abi && size == field.size { match field.backend_repr { // For plain scalars, or vectors of them, we can't unpack // newtypes for `#[repr(C)]`, as that affects C ABIs. @@ -1428,7 +1425,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { }; if offsets[i] == pair_offsets[FieldIdx::new(0)] && offsets[j] == pair_offsets[FieldIdx::new(1)] - && align == pair.align + && align == pair.align.abi && size == pair.size { // We can use `ScalarPair` only when it matches our @@ -1450,7 +1447,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { Some(l) => l.unadjusted_abi_align, None => { // `repr(transparent)` with all ZST fields. - align.abi + align } } } else { @@ -1465,7 +1462,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { backend_repr: abi, largest_niche, uninhabited, - align, + align: AbiAlign::new(align), size, max_repr_align, unadjusted_abi_align, diff --git a/compiler/rustc_abi/src/layout/simple.rs b/compiler/rustc_abi/src/layout/simple.rs index 0d0706defc2..b3807c87273 100644 --- a/compiler/rustc_abi/src/layout/simple.rs +++ b/compiler/rustc_abi/src/layout/simple.rs @@ -4,7 +4,8 @@ use rustc_hashes::Hash64; use rustc_index::{Idx, IndexVec}; use crate::{ - BackendRepr, FieldsShape, HasDataLayout, LayoutData, Niche, Primitive, Scalar, Size, Variants, + AbiAlign, BackendRepr, FieldsShape, HasDataLayout, LayoutData, Niche, Primitive, Scalar, Size, + Variants, }; /// "Simple" layout constructors that cannot fail. @@ -20,10 +21,10 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { backend_repr: BackendRepr::Memory { sized }, largest_niche: None, uninhabited: false, - align: dl.i8_align, + align: AbiAlign::new(dl.i8_align), size: Size::ZERO, max_repr_align: None, - unadjusted_abi_align: dl.i8_align.abi, + unadjusted_abi_align: dl.i8_align, randomization_seed: Hash64::new(0), } } @@ -37,10 +38,10 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { backend_repr: BackendRepr::Memory { sized: true }, largest_niche: None, uninhabited: true, - align: dl.i8_align, + align: AbiAlign::new(dl.i8_align), size: Size::ZERO, max_repr_align: None, - unadjusted_abi_align: dl.i8_align.abi, + unadjusted_abi_align: dl.i8_align, randomization_seed: Hash64::ZERO, } } @@ -89,10 +90,10 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { pub fn scalar_pair<C: HasDataLayout>(cx: &C, a: Scalar, b: Scalar) -> Self { let dl = cx.data_layout(); - let b_align = b.align(dl); - let align = a.align(dl).max(b_align).max(dl.aggregate_align); - let b_offset = a.size(dl).align_to(b_align.abi); - let size = (b_offset + b.size(dl)).align_to(align.abi); + let b_align = b.align(dl).abi; + let align = a.align(dl).abi.max(b_align).max(dl.aggregate_align); + let b_offset = a.size(dl).align_to(b_align); + let size = (b_offset + b.size(dl)).align_to(align); // HACK(nox): We iter on `b` and then `a` because `max_by_key` // returns the last maximum. @@ -112,10 +113,10 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { backend_repr: BackendRepr::ScalarPair(a, b), largest_niche, uninhabited: false, - align, + align: AbiAlign::new(align), size, max_repr_align: None, - unadjusted_abi_align: align.abi, + unadjusted_abi_align: align, randomization_seed: Hash64::new(combined_seed), } } @@ -138,10 +139,10 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { backend_repr: BackendRepr::Memory { sized: true }, largest_niche: None, uninhabited: true, - align: dl.i8_align, + align: AbiAlign::new(dl.i8_align), size: Size::ZERO, max_repr_align: None, - unadjusted_abi_align: dl.i8_align.abi, + unadjusted_abi_align: dl.i8_align, randomization_seed: Hash64::ZERO, } } diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 369874521e5..fe7148d1d57 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -229,7 +229,7 @@ pub struct PointerSpec { /// The size of the bitwise representation of the pointer. pointer_size: Size, /// The alignment of pointers for this address space - pointer_align: AbiAlign, + pointer_align: Align, /// The size of the value a pointer can be offset by in this address space. pointer_offset: Size, /// Pointers into this address space contain extra metadata @@ -242,20 +242,20 @@ pub struct PointerSpec { #[derive(Debug, PartialEq, Eq)] pub struct TargetDataLayout { pub endian: Endian, - pub i1_align: AbiAlign, - pub i8_align: AbiAlign, - pub i16_align: AbiAlign, - pub i32_align: AbiAlign, - pub i64_align: AbiAlign, - pub i128_align: AbiAlign, - pub f16_align: AbiAlign, - pub f32_align: AbiAlign, - pub f64_align: AbiAlign, - pub f128_align: AbiAlign, - pub aggregate_align: AbiAlign, + pub i1_align: Align, + pub i8_align: Align, + pub i16_align: Align, + pub i32_align: Align, + pub i64_align: Align, + pub i128_align: Align, + pub f16_align: Align, + pub f32_align: Align, + pub f64_align: Align, + pub f128_align: Align, + pub aggregate_align: Align, /// Alignments for vector types. - pub vector_align: Vec<(Size, AbiAlign)>, + pub vector_align: Vec<(Size, Align)>, pub default_address_space: AddressSpace, pub default_address_space_pointer_spec: PointerSpec, @@ -282,25 +282,25 @@ impl Default for TargetDataLayout { let align = |bits| Align::from_bits(bits).unwrap(); TargetDataLayout { endian: Endian::Big, - i1_align: AbiAlign::new(align(8)), - i8_align: AbiAlign::new(align(8)), - i16_align: AbiAlign::new(align(16)), - i32_align: AbiAlign::new(align(32)), - i64_align: AbiAlign::new(align(32)), - i128_align: AbiAlign::new(align(32)), - f16_align: AbiAlign::new(align(16)), - f32_align: AbiAlign::new(align(32)), - f64_align: AbiAlign::new(align(64)), - f128_align: AbiAlign::new(align(128)), - aggregate_align: AbiAlign { abi: align(8) }, + i1_align: align(8), + i8_align: align(8), + i16_align: align(16), + i32_align: align(32), + i64_align: align(32), + i128_align: align(32), + f16_align: align(16), + f32_align: align(32), + f64_align: align(64), + f128_align: align(128), + aggregate_align: align(8), vector_align: vec![ - (Size::from_bits(64), AbiAlign::new(align(64))), - (Size::from_bits(128), AbiAlign::new(align(128))), + (Size::from_bits(64), align(64)), + (Size::from_bits(128), align(128)), ], default_address_space: AddressSpace::ZERO, default_address_space_pointer_spec: PointerSpec { pointer_size: Size::from_bits(64), - pointer_align: AbiAlign::new(align(64)), + pointer_align: align(64), pointer_offset: Size::from_bits(64), _is_fat: false, }, @@ -360,7 +360,7 @@ impl TargetDataLayout { .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err }) }; let abi = parse_bits(s, "alignment", cause)?; - Ok(AbiAlign::new(align_from_bits(abi)?)) + Ok(align_from_bits(abi)?) }; // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`, @@ -596,7 +596,7 @@ impl TargetDataLayout { /// psABI-mandated alignment for a vector type, if any #[inline] - fn cabi_vector_align(&self, vec_size: Size) -> Option<AbiAlign> { + fn cabi_vector_align(&self, vec_size: Size) -> Option<Align> { self.vector_align .iter() .find(|(size, _align)| *size == vec_size) @@ -605,10 +605,9 @@ impl TargetDataLayout { /// an alignment resembling the one LLVM would pick for a vector #[inline] - pub fn llvmlike_vector_align(&self, vec_size: Size) -> AbiAlign { - self.cabi_vector_align(vec_size).unwrap_or(AbiAlign::new( - Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap(), - )) + pub fn llvmlike_vector_align(&self, vec_size: Size) -> Align { + self.cabi_vector_align(vec_size) + .unwrap_or(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap()) } /// Get the pointer size in the default data address space. @@ -654,21 +653,19 @@ impl TargetDataLayout { /// Get the pointer alignment in the default data address space. #[inline] pub fn pointer_align(&self) -> AbiAlign { - self.default_address_space_pointer_spec.pointer_align + AbiAlign::new(self.default_address_space_pointer_spec.pointer_align) } /// Get the pointer alignment in a specific address space. #[inline] pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign { - if c == self.default_address_space { - return self.default_address_space_pointer_spec.pointer_align; - } - - if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) { + AbiAlign::new(if c == self.default_address_space { + self.default_address_space_pointer_spec.pointer_align + } else if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) { e.1.pointer_align } else { panic!("Use of unknown address space {c:?}"); - } + }) } } @@ -1185,13 +1182,13 @@ impl Integer { use Integer::*; let dl = cx.data_layout(); - match self { + AbiAlign::new(match self { I8 => dl.i8_align, I16 => dl.i16_align, I32 => dl.i32_align, I64 => dl.i64_align, I128 => dl.i128_align, - } + }) } /// Returns the largest signed value that can be represented by this Integer. @@ -1311,12 +1308,12 @@ impl Float { use Float::*; let dl = cx.data_layout(); - match self { + AbiAlign::new(match self { F16 => dl.f16_align, F32 => dl.f32_align, F64 => dl.f64_align, F128 => dl.f128_align, - } + }) } } diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index a110ecbb75d..40375ef6510 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -564,7 +564,7 @@ impl<'ll> CodegenCx<'ll, '_> { let g = self.define_global(&sym, llty).unwrap_or_else(|| { bug!("symbol `{}` is already defined", sym); }); - set_global_alignment(self, g, self.tcx.data_layout.i8_align.abi); + set_global_alignment(self, g, self.tcx.data_layout.i8_align); llvm::set_initializer(g, llval); llvm::set_linkage(g, llvm::Linkage::PrivateLinkage); llvm::set_section(g, c"__TEXT,__cstring,cstring_literals"); @@ -680,7 +680,7 @@ impl<'ll> CodegenCx<'ll, '_> { let methname_g = self.define_global(&methname_sym, methname_llty).unwrap_or_else(|| { bug!("symbol `{}` is already defined", methname_sym); }); - set_global_alignment(self, methname_g, self.tcx.data_layout.i8_align.abi); + set_global_alignment(self, methname_g, self.tcx.data_layout.i8_align); llvm::set_initializer(methname_g, methname_llval); llvm::set_linkage(methname_g, llvm::Linkage::PrivateLinkage); llvm::set_section( diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 50398a32142..94a74e27bbc 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -1047,7 +1047,7 @@ fn codegen_emcc_try<'ll, 'tcx>( // create an alloca and pass a pointer to that. let ptr_size = bx.tcx().data_layout.pointer_size(); let ptr_align = bx.tcx().data_layout.pointer_align().abi; - let i8_align = bx.tcx().data_layout.i8_align.abi; + let i8_align = bx.tcx().data_layout.i8_align; // Required in order for there to be no padding between the fields. assert!(i8_align <= ptr_align); let catch_data = bx.alloca(2 * ptr_size, ptr_align); diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index ab08125217f..4d6fc3e19c5 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -291,7 +291,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( bx.inbounds_ptradd(va_list_addr, bx.const_usize(1)) // fpr }; - let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align.abi); + let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align); // "Align" the register count when the type is passed as `i64`. if is_i64 || (is_f64 && is_soft_float_abi) { @@ -329,7 +329,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( // Increase the used-register count. let reg_incr = if is_i64 || (is_f64 && is_soft_float_abi) { 2 } else { 1 }; let new_num_regs = bx.add(num_regs, bx.cx.const_u8(reg_incr)); - bx.store(new_num_regs, num_regs_addr, dl.i8_align.abi); + bx.store(new_num_regs, num_regs_addr, dl.i8_align); bx.br(end); @@ -339,7 +339,7 @@ fn emit_powerpc_va_arg<'ll, 'tcx>( let mem_addr = { bx.switch_to_block(in_mem); - bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align.abi); + bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align); // Everything in the overflow area is rounded up to a size of at least 4. let overflow_area_align = Align::from_bytes(4).unwrap(); @@ -813,7 +813,7 @@ fn emit_xtensa_va_arg<'ll, 'tcx>( let va_ndx_offset = va_reg_offset + 4; let offset_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(va_ndx_offset)); - let offset = bx.load(bx.type_i32(), offset_ptr, bx.tcx().data_layout.i32_align.abi); + let offset = bx.load(bx.type_i32(), offset_ptr, bx.tcx().data_layout.i32_align); let offset = round_up_to_alignment(bx, offset, layout.align.abi); let slot_size = layout.size.align_to(Align::from_bytes(4).unwrap()).bytes() as i32; diff --git a/compiler/rustc_session/src/config/cfg.rs b/compiler/rustc_session/src/config/cfg.rs index f3d91ce4a5d..a72f6201dce 100644 --- a/compiler/rustc_session/src/config/cfg.rs +++ b/compiler/rustc_session/src/config/cfg.rs @@ -259,11 +259,11 @@ pub(crate) fn default_configuration(sess: &Session) -> Cfg { }); let mut has_atomic = false; for (i, align) in [ - (8, layout.i8_align.abi), - (16, layout.i16_align.abi), - (32, layout.i32_align.abi), - (64, layout.i64_align.abi), - (128, layout.i128_align.abi), + (8, layout.i8_align), + (16, layout.i16_align), + (32, layout.i32_align), + (64, layout.i64_align), + (128, layout.i128_align), ] { if i >= sess.target.min_atomic_width() && i <= sess.target.max_atomic_width() { if !has_atomic { diff --git a/compiler/rustc_target/src/callconv/mips.rs b/compiler/rustc_target/src/callconv/mips.rs index 48a01da865b..8ffd7bd1778 100644 --- a/compiler/rustc_target/src/callconv/mips.rs +++ b/compiler/rustc_target/src/callconv/mips.rs @@ -24,7 +24,7 @@ where } let dl = cx.data_layout(); let size = arg.layout.size; - let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi; + let align = arg.layout.align.abi.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { let pad_i32 = !offset.is_aligned(align); diff --git a/compiler/rustc_target/src/callconv/mips64.rs b/compiler/rustc_target/src/callconv/mips64.rs index 0209838bec1..8386a15933c 100644 --- a/compiler/rustc_target/src/callconv/mips64.rs +++ b/compiler/rustc_target/src/callconv/mips64.rs @@ -110,9 +110,9 @@ where // We only care about aligned doubles if let BackendRepr::Scalar(scalar) = field.backend_repr { if scalar.primitive() == Primitive::Float(Float::F64) { - if offset.is_aligned(dl.f64_align.abi) { + if offset.is_aligned(dl.f64_align) { // Insert enough integers to cover [last_offset, offset) - assert!(last_offset.is_aligned(dl.f64_align.abi)); + assert!(last_offset.is_aligned(dl.f64_align)); for _ in 0..((offset - last_offset).bits() / 64) .min((prefix.len() - prefix_index) as u64) { diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs index 7a7c63c475b..c59af581a1f 100644 --- a/compiler/rustc_target/src/callconv/mod.rs +++ b/compiler/rustc_target/src/callconv/mod.rs @@ -332,7 +332,7 @@ impl CastTarget { self.prefix .iter() .filter_map(|x| x.map(|reg| reg.align(cx))) - .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)), |acc, align| { + .fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)), |acc, align| { acc.max(align) }) } diff --git a/compiler/rustc_target/src/callconv/sparc.rs b/compiler/rustc_target/src/callconv/sparc.rs index 48a01da865b..8ffd7bd1778 100644 --- a/compiler/rustc_target/src/callconv/sparc.rs +++ b/compiler/rustc_target/src/callconv/sparc.rs @@ -24,7 +24,7 @@ where } let dl = cx.data_layout(); let size = arg.layout.size; - let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi; + let align = arg.layout.align.abi.max(dl.i32_align).min(dl.i64_align); if arg.layout.is_aggregate() { let pad_i32 = !offset.is_aligned(align); diff --git a/compiler/rustc_target/src/callconv/sparc64.rs b/compiler/rustc_target/src/callconv/sparc64.rs index ecc9067ced3..62c8ed1dc21 100644 --- a/compiler/rustc_target/src/callconv/sparc64.rs +++ b/compiler/rustc_target/src/callconv/sparc64.rs @@ -29,7 +29,7 @@ where data.has_float = true; - if !data.last_offset.is_aligned(dl.f64_align.abi) && data.last_offset < offset { + if !data.last_offset.is_aligned(dl.f64_align) && data.last_offset < offset { if data.prefix_index == data.prefix.len() { return data; } |
