about summary refs log tree commit diff
path: root/compiler/rustc_codegen_llvm
diff options
context:
space:
mode:
authorJubilee Young <workingjubilee@gmail.com>2025-09-28 14:40:39 -0700
committerJubilee Young <workingjubilee@gmail.com>2025-09-28 15:02:14 -0700
commit0c9d0dfe046f0674f0507df564504ac3bac862d9 (patch)
tree567c070b8c49c1a7cb75fb3770f12ac4d645ff7e /compiler/rustc_codegen_llvm
parentc8905eaa66e0c35a33626e974b9ce6955c739b5b (diff)
downloadrust-0c9d0dfe046f0674f0507df564504ac3bac862d9.tar.gz
rust-0c9d0dfe046f0674f0507df564504ac3bac862d9.zip
remove explicit deref of AbiAlign for most methods
Much of the compiler calls functions on Align projected from AbiAlign.
AbiAlign impls Deref to its inner Align, so we can simplify these away.
Also, it will minimize disruption when AbiAlign is removed.

For now, preserve usages that might resolve to PartialOrd or PartialEq,
as those have odd inference.
Diffstat (limited to 'compiler/rustc_codegen_llvm')
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/intrinsic.rs2
-rw-r--r--compiler/rustc_codegen_llvm/src/va_arg.rs4
4 files changed, 6 insertions, 6 deletions
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
index 4b74c04ed7a..1e4ace4ca92 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata.rs
@@ -1043,7 +1043,7 @@ fn create_member_type<'ll, 'tcx>(
             file_metadata,
             line_number,
             layout.size.bits(),
-            layout.align.abi.bits() as u32,
+            layout.align.bits() as u32,
             offset.bits(),
             flags,
             type_di_node,
diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
index 62d38d463ab..1ae6e6e5eec 100644
--- a/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
+++ b/compiler/rustc_codegen_llvm/src/debuginfo/metadata/enums/native.rs
@@ -289,7 +289,7 @@ fn build_enum_variant_part_di_node<'ll, 'tcx>(
                 file_metadata,
                 line_number,
                 enum_type_and_layout.size.bits(),
-                enum_type_and_layout.align.abi.bits() as u32,
+                enum_type_and_layout.align.bits() as u32,
                 DIFlags::FlagZero,
                 tag_member_di_node,
                 create_DIArray(DIB(cx), &[]),
@@ -449,7 +449,7 @@ fn build_enum_variant_member_di_node<'ll, 'tcx>(
             file_di_node,
             line_number,
             enum_type_and_layout.size.bits(),
-            enum_type_and_layout.align.abi.bits() as u32,
+            enum_type_and_layout.align.bits() as u32,
             Size::ZERO.bits(),
             discr,
             DIFlags::FlagZero,
diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs
index 013108d1286..ba80352916b 100644
--- a/compiler/rustc_codegen_llvm/src/intrinsic.rs
+++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs
@@ -297,7 +297,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
                 let align = if name == sym::unaligned_volatile_load {
                     1
                 } else {
-                    result.layout.align.abi.bytes() as u32
+                    result.layout.align.bytes() as u32
                 };
                 unsafe {
                     llvm::LLVMSetAlignment(load, align);
diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs
index d48c7cf874a..b2c60db9c67 100644
--- a/compiler/rustc_codegen_llvm/src/va_arg.rs
+++ b/compiler/rustc_codegen_llvm/src/va_arg.rs
@@ -193,7 +193,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
     // the offset again.
 
     bx.switch_to_block(maybe_reg);
-    if gr_type && layout.align.abi.bytes() > 8 {
+    if gr_type && layout.align.bytes() > 8 {
         reg_off_v = bx.add(reg_off_v, bx.const_i32(15));
         reg_off_v = bx.and(reg_off_v, bx.const_i32(-16));
     }
@@ -761,7 +761,7 @@ fn x86_64_sysv64_va_arg_from_memory<'ll, 'tcx>(
     // byte boundary if alignment needed by type exceeds 8 byte boundary.
     // It isn't stated explicitly in the standard, but in practice we use
     // alignment greater than 16 where necessary.
-    if layout.layout.align.abi.bytes() > 8 {
+    if layout.layout.align.bytes() > 8 {
         unreachable!("all instances of VaArgSafe have an alignment <= 8");
     }