about summary refs log tree commit diff
diff options
context:
space:
mode:
authorOli Scherer <github333195615777966@oli-obk.de>2025-01-10 11:28:20 +0000
committerOli Scherer <github333195615777966@oli-obk.de>2025-01-21 08:22:15 +0000
commit8876cf7181556a0820e8ea6e40dad309a1063139 (patch)
tree6cbc8f207c135fb0540aa50ac58f3ff93e200955
parentb605c65b6eb5fa71783f8e26df69975f9f1680ee (diff)
downloadrust-8876cf7181556a0820e8ea6e40dad309a1063139.tar.gz
rust-8876cf7181556a0820e8ea6e40dad309a1063139.zip
Also generate undef scalars and scalar pairs
-rw-r--r--compiler/rustc_codegen_ssa/src/mir/operand.rs59
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs2
-rw-r--r--tests/codegen/overaligned-constant.rs2
3 files changed, 38 insertions, 25 deletions
diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs
index 19101ec2d1b..9ca7d4f8f00 100644
--- a/compiler/rustc_codegen_ssa/src/mir/operand.rs
+++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs
@@ -204,14 +204,30 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
         let alloc_align = alloc.inner().align;
         assert!(alloc_align >= layout.align.abi);
 
+        // Returns `None` when the value is partially undefined or any byte of it has provenance.
+        // Otherwise returns the value or (if the entire value is undef) returns an undef.
         let read_scalar = |start, size, s: abi::Scalar, ty| {
+            let range = alloc_range(start, size);
             match alloc.0.read_scalar(
                 bx,
-                alloc_range(start, size),
+                range,
                 /*read_provenance*/ matches!(s.primitive(), abi::Primitive::Pointer(_)),
             ) {
-                Ok(val) => bx.scalar_to_backend(val, s, ty),
-                Err(_) => bx.const_poison(ty),
+                Ok(val) => Some(bx.scalar_to_backend(val, s, ty)),
+                Err(_) => {
+                    // We may have failed due to partial provenance or unexpected provenance,
+                    // continue down the normal code path if so.
+                    if alloc.0.provenance().range_empty(range, &bx.tcx())
+                        // Since `read_scalar` failed, but there were no relocations involved, the
+                        // bytes must be partially or fully uninitialized. Thus we can now unwrap the
+                        // information about the range of uninit bytes and check if it's the full range.
+                        && alloc.0.init_mask().is_range_initialized(range).unwrap_err() == range
+                    {
+                        Some(bx.const_undef(ty))
+                    } else {
+                        None
+                    }
+                }
             }
         };
 
@@ -222,16 +238,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
         // check that walks over the type of `mplace` to make sure it is truly correct to treat this
         // like a `Scalar` (or `ScalarPair`).
         match layout.backend_repr {
-            BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => {
+            BackendRepr::Scalar(s) => {
                 let size = s.size(bx);
                 assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
-                let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
-                OperandRef { val: OperandValue::Immediate(val), layout }
+                if let Some(val) = read_scalar(offset, size, s, bx.immediate_backend_type(layout)) {
+                    return OperandRef { val: OperandValue::Immediate(val), layout };
+                }
             }
-            BackendRepr::ScalarPair(
-                a @ abi::Scalar::Initialized { .. },
-                b @ abi::Scalar::Initialized { .. },
-            ) => {
+            BackendRepr::ScalarPair(a, b) => {
                 let (a_size, b_size) = (a.size(bx), b.size(bx));
                 let b_offset = (offset + a_size).align_to(b.align(bx).abi);
                 assert!(b_offset.bytes() > 0);
@@ -247,20 +261,21 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
                     b,
                     bx.scalar_pair_element_backend_type(layout, 1, true),
                 );
-                OperandRef { val: OperandValue::Pair(a_val, b_val), layout }
-            }
-            _ if layout.is_zst() => OperandRef::zero_sized(layout),
-            _ => {
-                // Neither a scalar nor scalar pair. Load from a place
-                // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the
-                // same `ConstAllocation`?
-                let init = bx.const_data_from_alloc(alloc);
-                let base_addr = bx.static_addr_of(init, alloc_align, None);
-
-                let llval = bx.const_ptr_byte_offset(base_addr, offset);
-                bx.load_operand(PlaceRef::new_sized(llval, layout))
+                if let (Some(a_val), Some(b_val)) = (a_val, b_val) {
+                    return OperandRef { val: OperandValue::Pair(a_val, b_val), layout };
+                }
             }
+            _ if layout.is_zst() => return OperandRef::zero_sized(layout),
+            _ => {}
         }
+        // Neither a scalar nor scalar pair. Load from a place
+        // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the
+        // same `ConstAllocation`?
+        let init = bx.const_data_from_alloc(alloc);
+        let base_addr = bx.static_addr_of(init, alloc_align, None);
+
+        let llval = bx.const_ptr_byte_offset(base_addr, offset);
+        bx.load_operand(PlaceRef::new_sized(llval, layout))
     }
 
     /// Asserts that this operand refers to a scalar and returns
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
index d6f8fed755f..1b07846e0cf 100644
--- a/compiler/rustc_middle/src/mir/interpret/allocation.rs
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -222,7 +222,7 @@ impl AllocError {
 }
 
 /// The information that makes up a memory access: offset and size.
-#[derive(Copy, Clone)]
+#[derive(Copy, Clone, PartialEq)]
 pub struct AllocRange {
     pub start: Size,
     pub size: Size,
diff --git a/tests/codegen/overaligned-constant.rs b/tests/codegen/overaligned-constant.rs
index 7cd8d19c211..e5540aca387 100644
--- a/tests/codegen/overaligned-constant.rs
+++ b/tests/codegen/overaligned-constant.rs
@@ -17,8 +17,6 @@ pub fn overaligned_constant() {
     // CHECK-LABEL: @overaligned_constant
     // CHECK: [[full:%_.*]] = alloca [32 x i8], align 8
     // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[full]], ptr align 8 @0, i64 32, i1 false)
-    // CHECK: %b.0 = load i32, ptr @0, align 4
-    // CHECK: %b.1 = load i32, ptr getelementptr inbounds ({{.*}}), align 4
     let mut s = S(1);
 
     s.0 = 3;