about summary refs log tree commit diff
path: root/compiler/rustc_const_eval
diff options
context:
space:
mode:
authorbors <bors@rust-lang.org>2025-08-17 04:33:31 +0000
committerbors <bors@rust-lang.org>2025-08-17 04:33:31 +0000
commit99ba5565678a51c2488322a5e75d5b59e323b498 (patch)
tree09d5c634a19e6288a856e35b14f010dd51bf1e3b /compiler/rustc_const_eval
parent2e2642e641a941f0a1400c7827fd89aa86fef8f4 (diff)
parentba5b6b9ec472dc32bdaa8b18c22d30bd6abf7ebc (diff)
downloadrust-99ba5565678a51c2488322a5e75d5b59e323b498.tar.gz
rust-99ba5565678a51c2488322a5e75d5b59e323b498.zip
Auto merge of #144081 - RalfJung:const-ptr-fragments, r=oli-obk
const-eval: full support for pointer fragments

This fixes https://github.com/rust-lang/const-eval/issues/72 and makes `swap_nonoverlapping` fully work in const-eval by enhancing per-byte provenance tracking with tracking of *which* of the bytes of the pointer this one is. Later, if we see all the same bytes in the exact same order, we can treat it like a whole pointer again without ever risking a leak of the data bytes (that encode the offset into the allocation). This lifts the limitation that was discussed quite a bit in https://github.com/rust-lang/rust/pull/137280.

For a concrete piece of code that used to fail and now works properly consider this example doing a byte-for-byte memcpy in const without using intrinsics:
```rust
use std::{mem::{self, MaybeUninit}, ptr};

type Byte = MaybeUninit<u8>;

const unsafe fn memcpy(dst: *mut Byte, src: *const Byte, n: usize) {
    let mut i = 0;
    while i < n {
        *dst.add(i) = *src.add(i);
        i += 1;
    }
}

const _MEMCPY: () = unsafe {
    let ptr = &42;
    let mut ptr2 = ptr::null::<i32>();
    // Copy from ptr to ptr2.
    memcpy(&mut ptr2 as *mut _ as *mut _, &ptr as *const _ as *const _, mem::size_of::<&i32>());
    assert!(*ptr2 == 42);
};
```
What makes this code tricky is that pointers are "opaque blobs" in const-eval, we cannot just let people look at the individual bytes since *we don't know what those bytes look like* -- that depends on the absolute address the pointed-to object will be placed at. The code above "breaks apart" a pointer into individual bytes, and then puts them back together in the same order elsewhere. This PR implements the logic to properly track how those individual bytes relate to the original pointer, and to recognize when they are in the right order again.

We still reject constants where the final value contains a not-fully-put-together pointer: I have no idea how one could construct an LLVM global where one byte is defined as "the 3rd byte of a pointer to that other global over there" -- and even if LLVM supports this somehow, we can leave implementing that to a future PR. It seems unlikely to me anyone would even want this, but who knows.^^

This also changes the behavior of Miri, by tracking the order of bytes with provenance and only considering a pointer to have valid provenance if all bytes are in the original order again. This is related to https://github.com/rust-lang/unsafe-code-guidelines/issues/558. It means one cannot implement XOR linked lists with strict provenance any more, which is however only of theoretical interest. Practically I am curious if anyone will show up with any code that Miri now complains about - that would be interesting data. Cc `@rust-lang/opsem`
Diffstat (limited to 'compiler/rustc_const_eval')
-rw-r--r--compiler/rustc_const_eval/messages.ftl11
-rw-r--r--compiler/rustc_const_eval/src/const_eval/eval_queries.rs7
-rw-r--r--compiler/rustc_const_eval/src/errors.rs16
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs97
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs44
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs4
7 files changed, 101 insertions, 86 deletions
diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl
index 2985eafb63a..60518dafbf2 100644
--- a/compiler/rustc_const_eval/messages.ftl
+++ b/compiler/rustc_const_eval/messages.ftl
@@ -57,7 +57,7 @@ const_eval_const_context = {$kind ->
 }
 
 const_eval_const_heap_ptr_in_final = encountered `const_allocate` pointer in final value that was not made global
-    .note = use `const_make_global` to make allocated pointers immutable before returning
+    .note = use `const_make_global` to turn allocated pointers into immutable globals before returning
 
 const_eval_const_make_global_ptr_already_made_global = attempting to call `const_make_global` twice on the same allocation {$alloc}
 
@@ -231,6 +231,9 @@ const_eval_mutable_borrow_escaping =
 
 const_eval_mutable_ptr_in_final = encountered mutable pointer in final value of {const_eval_intern_kind}
 
+const_eval_partial_pointer_in_final = encountered partial pointer in final value of {const_eval_intern_kind}
+    .note = while pointers can be broken apart into individual bytes during const-evaluation, only complete pointers (with all their bytes in the right order) are supported in the final value
+
 const_eval_nested_static_in_thread_local = #[thread_local] does not support implicit nested statics, please create explicit static items and refer to them instead
 
 const_eval_non_const_await =
@@ -299,10 +302,8 @@ const_eval_panic = evaluation panicked: {$msg}
 
 const_eval_panic_non_str = argument to `panic!()` in a const context must have type `&str`
 
-const_eval_partial_pointer_copy =
-    unable to copy parts of a pointer from memory at {$ptr}
-const_eval_partial_pointer_overwrite =
-    unable to overwrite parts of a pointer in memory at {$ptr}
+const_eval_partial_pointer_read =
+    unable to read parts of a pointer from memory at {$ptr}
 const_eval_pointer_arithmetic_overflow =
     overflowing pointer arithmetic: the total offset in bytes does not fit in an `isize`
 
diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
index 5835660e1c3..4da2663319c 100644
--- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
+++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs
@@ -117,6 +117,13 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>(
                 ecx.tcx.dcx().emit_err(errors::ConstHeapPtrInFinal { span: ecx.tcx.span }),
             )));
         }
+        Err(InternError::PartialPointer) => {
+            throw_inval!(AlreadyReported(ReportedErrorInfo::non_const_eval_error(
+                ecx.tcx
+                    .dcx()
+                    .emit_err(errors::PartialPtrInFinal { span: ecx.tcx.span, kind: intern_kind }),
+            )));
+        }
     }
 
     interp_ok(R::make_result(ret, ecx))
diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs
index a4148cb145f..2d412ee5ec2 100644
--- a/compiler/rustc_const_eval/src/errors.rs
+++ b/compiler/rustc_const_eval/src/errors.rs
@@ -52,6 +52,15 @@ pub(crate) struct ConstHeapPtrInFinal {
 }
 
 #[derive(Diagnostic)]
+#[diag(const_eval_partial_pointer_in_final)]
+#[note]
+pub(crate) struct PartialPtrInFinal {
+    #[primary_span]
+    pub span: Span,
+    pub kind: InternKind,
+}
+
+#[derive(Diagnostic)]
 #[diag(const_eval_unstable_in_stable_exposed)]
 pub(crate) struct UnstableInStableExposed {
     pub gate: String,
@@ -836,8 +845,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
             UnsupportedOpInfo::Unsupported(s) => s.clone().into(),
             UnsupportedOpInfo::ExternTypeField => const_eval_extern_type_field,
             UnsupportedOpInfo::UnsizedLocal => const_eval_unsized_local,
-            UnsupportedOpInfo::OverwritePartialPointer(_) => const_eval_partial_pointer_overwrite,
-            UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_copy,
+            UnsupportedOpInfo::ReadPartialPointer(_) => const_eval_partial_pointer_read,
             UnsupportedOpInfo::ReadPointerAsInt(_) => const_eval_read_pointer_as_int,
             UnsupportedOpInfo::ThreadLocalStatic(_) => const_eval_thread_local_static,
             UnsupportedOpInfo::ExternStatic(_) => const_eval_extern_static,
@@ -848,7 +856,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
         use UnsupportedOpInfo::*;
 
         use crate::fluent_generated::*;
-        if let ReadPointerAsInt(_) | OverwritePartialPointer(_) | ReadPartialPointer(_) = self {
+        if let ReadPointerAsInt(_) | ReadPartialPointer(_) = self {
             diag.help(const_eval_ptr_as_bytes_1);
             diag.help(const_eval_ptr_as_bytes_2);
         }
@@ -860,7 +868,7 @@ impl ReportErrorExt for UnsupportedOpInfo {
             | UnsupportedOpInfo::ExternTypeField
             | Unsupported(_)
             | ReadPointerAsInt(_) => {}
-            OverwritePartialPointer(ptr) | ReadPartialPointer(ptr) => {
+            ReadPartialPointer(ptr) => {
                 diag.arg("ptr", ptr);
             }
             ThreadLocalStatic(did) | ExternStatic(did) => rustc_middle::ty::tls::with(|tcx| {
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index bb59b9f5418..5d510a98352 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -19,9 +19,12 @@ use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
 use rustc_hir as hir;
 use rustc_hir::definitions::{DefPathData, DisambiguatorState};
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
-use rustc_middle::mir::interpret::{ConstAllocation, CtfeProvenance, InterpResult};
+use rustc_middle::mir::interpret::{
+    AllocBytes, ConstAllocation, CtfeProvenance, InterpResult, Provenance,
+};
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::span_bug;
+use rustc_middle::ty::TyCtxt;
 use rustc_middle::ty::layout::TyAndLayout;
 use rustc_span::def_id::LocalDefId;
 use tracing::{instrument, trace};
@@ -52,39 +55,30 @@ impl HasStaticRootDefId for const_eval::CompileTimeMachine<'_> {
     }
 }
 
-/// Intern an allocation. Returns `Err` if the allocation does not exist in the local memory.
-///
-/// `mutability` can be used to force immutable interning: if it is `Mutability::Not`, the
-/// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be*
-/// already mutable (as a sanity check).
-///
-/// Returns an iterator over all relocations referred to by this allocation.
-fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>(
-    ecx: &mut InterpCx<'tcx, M>,
-    alloc_id: AllocId,
+fn prepare_alloc<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
+    tcx: TyCtxt<'tcx>,
+    kind: MemoryKind<const_eval::MemoryKind>,
+    alloc: &mut Allocation<Prov, Extra, Bytes>,
     mutability: Mutability,
-    disambiguator: Option<&mut DisambiguatorState>,
-) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, InternError> {
-    trace!("intern_shallow {:?}", alloc_id);
-    // remove allocation
-    // FIXME(#120456) - is `swap_remove` correct?
-    let Some((kind, mut alloc)) = ecx.memory.alloc_map.swap_remove(&alloc_id) else {
-        return Err(InternError::DanglingPointer);
-    };
-
+) -> Result<(), InternError> {
     match kind {
         MemoryKind::Machine(const_eval::MemoryKind::Heap { was_made_global }) => {
             if !was_made_global {
                 // Attempting to intern a `const_allocate`d pointer that was not made global via
-                // `const_make_global`. We want to error here, but we have to first put the
-                // allocation back into the `alloc_map` to keep things in a consistent state.
-                ecx.memory.alloc_map.insert(alloc_id, (kind, alloc));
+                // `const_make_global`.
+                tcx.dcx().delayed_bug("non-global heap allocation in const value");
                 return Err(InternError::ConstAllocNotGlobal);
             }
         }
         MemoryKind::Stack | MemoryKind::CallerLocation => {}
     }
 
+    if !alloc.provenance_merge_bytes(&tcx) {
+        // Per-byte provenance is not supported by backends, so we cannot accept it here.
+        tcx.dcx().delayed_bug("partial pointer in const value");
+        return Err(InternError::PartialPointer);
+    }
+
     // Set allocation mutability as appropriate. This is used by LLVM to put things into
     // read-only memory, and also by Miri when evaluating other globals that
     // access this one.
@@ -97,6 +91,36 @@ fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>(
             assert_eq!(alloc.mutability, Mutability::Mut);
         }
     }
+    Ok(())
+}
+
+/// Intern an allocation. Returns `Err` if the allocation does not exist in the local memory.
+///
+/// `mutability` can be used to force immutable interning: if it is `Mutability::Not`, the
+/// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be*
+/// already mutable (as a sanity check).
+///
+/// Returns an iterator over all relocations referred to by this allocation.
+fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>(
+    ecx: &mut InterpCx<'tcx, M>,
+    alloc_id: AllocId,
+    mutability: Mutability,
+    disambiguator: Option<&mut DisambiguatorState>,
+) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, InternError> {
+    trace!("intern_shallow {:?}", alloc_id);
+    // remove allocation
+    // FIXME(#120456) - is `swap_remove` correct?
+    let Some((kind, mut alloc)) = ecx.memory.alloc_map.swap_remove(&alloc_id) else {
+        return Err(InternError::DanglingPointer);
+    };
+
+    if let Err(err) = prepare_alloc(*ecx.tcx, kind, &mut alloc, mutability) {
+        // We want to error here, but we have to first put the
+        // allocation back into the `alloc_map` to keep things in a consistent state.
+        ecx.memory.alloc_map.insert(alloc_id, (kind, alloc));
+        return Err(err);
+    }
+
     // link the alloc id to the actual allocation
     let alloc = ecx.tcx.mk_const_alloc(alloc);
     if let Some(static_id) = ecx.machine.static_def_id() {
@@ -166,6 +190,7 @@ pub enum InternError {
     BadMutablePointer,
     DanglingPointer,
     ConstAllocNotGlobal,
+    PartialPointer,
 }
 
 /// Intern `ret` and everything it references.
@@ -221,13 +246,11 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
     let mut todo: Vec<_> = if is_static {
         // Do not steal the root allocation, we need it later to create the return value of `eval_static_initializer`.
         // But still change its mutability to match the requested one.
-        let alloc = ecx.memory.alloc_map.get_mut(&base_alloc_id).unwrap();
-        alloc.1.mutability = base_mutability;
-        alloc.1.provenance().ptrs().iter().map(|&(_, prov)| prov).collect()
+        let (kind, alloc) = ecx.memory.alloc_map.get_mut(&base_alloc_id).unwrap();
+        prepare_alloc(*ecx.tcx, *kind, alloc, base_mutability)?;
+        alloc.provenance().ptrs().iter().map(|&(_, prov)| prov).collect()
     } else {
-        intern_shallow(ecx, base_alloc_id, base_mutability, Some(&mut disambiguator))
-            .unwrap()
-            .collect()
+        intern_shallow(ecx, base_alloc_id, base_mutability, Some(&mut disambiguator))?.collect()
     };
     // We need to distinguish "has just been interned" from "was already in `tcx`",
     // so we track this in a separate set.
@@ -235,7 +258,6 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
     // Whether we encountered a bad mutable pointer.
     // We want to first report "dangling" and then "mutable", so we need to delay reporting these
     // errors.
-    let mut result = Ok(());
     let mut found_bad_mutable_ptr = false;
 
     // Keep interning as long as there are things to intern.
@@ -310,20 +332,15 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
         // okay with losing some potential for immutability here. This can anyway only affect
         // `static mut`.
         just_interned.insert(alloc_id);
-        match intern_shallow(ecx, alloc_id, inner_mutability, Some(&mut disambiguator)) {
-            Ok(nested) => todo.extend(nested),
-            Err(err) => {
-                ecx.tcx.dcx().delayed_bug("error during const interning");
-                result = Err(err);
-            }
-        }
+        let next = intern_shallow(ecx, alloc_id, inner_mutability, Some(&mut disambiguator))?;
+        todo.extend(next);
     }
-    if found_bad_mutable_ptr && result.is_ok() {
+    if found_bad_mutable_ptr {
         // We found a mutable pointer inside a const where inner allocations should be immutable,
         // and there was no other error. This should usually never happen! However, this can happen
         // in unleash-miri mode, so report it as a normal error then.
         if ecx.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you {
-            result = Err(InternError::BadMutablePointer);
+            return Err(InternError::BadMutablePointer);
         } else {
             span_bug!(
                 ecx.tcx.span,
@@ -331,7 +348,7 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
             );
         }
     }
-    result
+    Ok(())
 }
 
 /// Intern `ret`. This function assumes that `ret` references no other allocation.
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 23b40894f16..2c1e5087e1c 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -1314,29 +1314,20 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
     }
 
     /// Mark the given sub-range (relative to this allocation reference) as uninitialized.
-    pub fn write_uninit(&mut self, range: AllocRange) -> InterpResult<'tcx> {
+    pub fn write_uninit(&mut self, range: AllocRange) {
         let range = self.range.subrange(range);
 
-        self.alloc
-            .write_uninit(&self.tcx, range)
-            .map_err(|e| e.to_interp_error(self.alloc_id))
-            .into()
+        self.alloc.write_uninit(&self.tcx, range);
     }
 
     /// Mark the entire referenced range as uninitialized
-    pub fn write_uninit_full(&mut self) -> InterpResult<'tcx> {
-        self.alloc
-            .write_uninit(&self.tcx, self.range)
-            .map_err(|e| e.to_interp_error(self.alloc_id))
-            .into()
+    pub fn write_uninit_full(&mut self) {
+        self.alloc.write_uninit(&self.tcx, self.range);
     }
 
     /// Remove all provenance in the reference range.
-    pub fn clear_provenance(&mut self) -> InterpResult<'tcx> {
-        self.alloc
-            .clear_provenance(&self.tcx, self.range)
-            .map_err(|e| e.to_interp_error(self.alloc_id))
-            .into()
+    pub fn clear_provenance(&mut self) {
+        self.alloc.clear_provenance(&self.tcx, self.range);
     }
 }
 
@@ -1427,11 +1418,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
 
         // Side-step AllocRef and directly access the underlying bytes more efficiently.
         // (We are staying inside the bounds here and all bytes do get overwritten so all is good.)
-        let alloc_id = alloc_ref.alloc_id;
-        let bytes = alloc_ref
-            .alloc
-            .get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range)
-            .map_err(move |e| e.to_interp_error(alloc_id))?;
+        let bytes =
+            alloc_ref.alloc.get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range);
         // `zip` would stop when the first iterator ends; we want to definitely
         // cover all of `bytes`.
         for dest in bytes {
@@ -1513,10 +1501,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         // `get_bytes_mut` will clear the provenance, which is correct,
         // since we don't want to keep any provenance at the target.
         // This will also error if copying partial provenance is not supported.
-        let provenance = src_alloc
-            .provenance()
-            .prepare_copy(src_range, dest_offset, num_copies, self)
-            .map_err(|e| e.to_interp_error(src_alloc_id))?;
+        let provenance =
+            src_alloc.provenance().prepare_copy(src_range, dest_offset, num_copies, self);
         // Prepare a copy of the initialization mask.
         let init = src_alloc.init_mask().prepare_copy(src_range);
 
@@ -1534,10 +1520,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             dest_range,
         )?;
         // Yes we do overwrite all bytes in `dest_bytes`.
-        let dest_bytes = dest_alloc
-            .get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range)
-            .map_err(|e| e.to_interp_error(dest_alloc_id))?
-            .as_mut_ptr();
+        let dest_bytes =
+            dest_alloc.get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range).as_mut_ptr();
 
         if init.no_bytes_init() {
             // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
@@ -1546,9 +1530,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             // This also avoids writing to the target bytes so that the backing allocation is never
             // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
             // operating system this can avoid physically allocating the page.
-            dest_alloc
-                .write_uninit(&tcx, dest_range)
-                .map_err(|e| e.to_interp_error(dest_alloc_id))?;
+            dest_alloc.write_uninit(&tcx, dest_range);
             // `write_uninit` also resets the provenance, so we are done.
             return interp_ok(());
         }
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index 6ff50dc700f..3255ffa54aa 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -705,7 +705,7 @@ where
 
         match value {
             Immediate::Scalar(scalar) => {
-                alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
+                alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)?;
             }
             Immediate::ScalarPair(a_val, b_val) => {
                 let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
@@ -725,10 +725,10 @@ where
                 alloc.write_scalar(alloc_range(Size::ZERO, a_val.size()), a_val)?;
                 alloc.write_scalar(alloc_range(b_offset, b_val.size()), b_val)?;
                 // We don't have to reset padding here, `write_immediate` will anyway do a validation run.
-                interp_ok(())
             }
             Immediate::Uninit => alloc.write_uninit_full(),
         }
+        interp_ok(())
     }
 
     pub fn write_uninit(
@@ -748,7 +748,7 @@ where
                     // Zero-sized access
                     return interp_ok(());
                 };
-                alloc.write_uninit_full()?;
+                alloc.write_uninit_full();
             }
         }
         interp_ok(())
@@ -772,7 +772,7 @@ where
                     // Zero-sized access
                     return interp_ok(());
                 };
-                alloc.clear_provenance()?;
+                alloc.clear_provenance();
             }
         }
         interp_ok(())
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index ab0c0665d51..5e8bee65706 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -949,7 +949,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                 let padding_size = offset - padding_cleared_until;
                 let range = alloc_range(padding_start, padding_size);
                 trace!("reset_padding on {}: resetting padding range {range:?}", mplace.layout.ty);
-                alloc.write_uninit(range)?;
+                alloc.write_uninit(range);
             }
             padding_cleared_until = offset + size;
         }
@@ -1239,7 +1239,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
                 if self.reset_provenance_and_padding {
                     // We can't share this with above as above, we might be looking at read-only memory.
                     let mut alloc = self.ecx.get_ptr_alloc_mut(mplace.ptr(), size)?.expect("we already excluded size 0");
-                    alloc.clear_provenance()?;
+                    alloc.clear_provenance();
                     // Also, mark this as containing data, not padding.
                     self.add_data_range(mplace.ptr(), size);
                 }