about summary refs log tree commit diff
path: root/compiler/rustc_const_eval/src/interpret
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_const_eval/src/interpret')
-rw-r--r--compiler/rustc_const_eval/src/interpret/intern.rs97
-rw-r--r--compiler/rustc_const_eval/src/interpret/memory.rs44
-rw-r--r--compiler/rustc_const_eval/src/interpret/place.rs8
-rw-r--r--compiler/rustc_const_eval/src/interpret/validity.rs4
4 files changed, 76 insertions, 77 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs
index bb59b9f5418..5d510a98352 100644
--- a/compiler/rustc_const_eval/src/interpret/intern.rs
+++ b/compiler/rustc_const_eval/src/interpret/intern.rs
@@ -19,9 +19,12 @@ use rustc_data_structures::fx::{FxHashSet, FxIndexMap};
 use rustc_hir as hir;
 use rustc_hir::definitions::{DefPathData, DisambiguatorState};
 use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
-use rustc_middle::mir::interpret::{ConstAllocation, CtfeProvenance, InterpResult};
+use rustc_middle::mir::interpret::{
+    AllocBytes, ConstAllocation, CtfeProvenance, InterpResult, Provenance,
+};
 use rustc_middle::query::TyCtxtAt;
 use rustc_middle::span_bug;
+use rustc_middle::ty::TyCtxt;
 use rustc_middle::ty::layout::TyAndLayout;
 use rustc_span::def_id::LocalDefId;
 use tracing::{instrument, trace};
@@ -52,39 +55,30 @@ impl HasStaticRootDefId for const_eval::CompileTimeMachine<'_> {
     }
 }
 
-/// Intern an allocation. Returns `Err` if the allocation does not exist in the local memory.
-///
-/// `mutability` can be used to force immutable interning: if it is `Mutability::Not`, the
-/// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be*
-/// already mutable (as a sanity check).
-///
-/// Returns an iterator over all relocations referred to by this allocation.
-fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>(
-    ecx: &mut InterpCx<'tcx, M>,
-    alloc_id: AllocId,
+fn prepare_alloc<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>(
+    tcx: TyCtxt<'tcx>,
+    kind: MemoryKind<const_eval::MemoryKind>,
+    alloc: &mut Allocation<Prov, Extra, Bytes>,
     mutability: Mutability,
-    disambiguator: Option<&mut DisambiguatorState>,
-) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, InternError> {
-    trace!("intern_shallow {:?}", alloc_id);
-    // remove allocation
-    // FIXME(#120456) - is `swap_remove` correct?
-    let Some((kind, mut alloc)) = ecx.memory.alloc_map.swap_remove(&alloc_id) else {
-        return Err(InternError::DanglingPointer);
-    };
-
+) -> Result<(), InternError> {
     match kind {
         MemoryKind::Machine(const_eval::MemoryKind::Heap { was_made_global }) => {
             if !was_made_global {
                 // Attempting to intern a `const_allocate`d pointer that was not made global via
-                // `const_make_global`. We want to error here, but we have to first put the
-                // allocation back into the `alloc_map` to keep things in a consistent state.
-                ecx.memory.alloc_map.insert(alloc_id, (kind, alloc));
+                // `const_make_global`.
+                tcx.dcx().delayed_bug("non-global heap allocation in const value");
                 return Err(InternError::ConstAllocNotGlobal);
             }
         }
         MemoryKind::Stack | MemoryKind::CallerLocation => {}
     }
 
+    if !alloc.provenance_merge_bytes(&tcx) {
+        // Per-byte provenance is not supported by backends, so we cannot accept it here.
+        tcx.dcx().delayed_bug("partial pointer in const value");
+        return Err(InternError::PartialPointer);
+    }
+
     // Set allocation mutability as appropriate. This is used by LLVM to put things into
     // read-only memory, and also by Miri when evaluating other globals that
     // access this one.
@@ -97,6 +91,36 @@ fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>(
             assert_eq!(alloc.mutability, Mutability::Mut);
         }
     }
+    Ok(())
+}
+
+/// Intern an allocation. Returns `Err` if the allocation does not exist in the local memory.
+///
+/// `mutability` can be used to force immutable interning: if it is `Mutability::Not`, the
+/// allocation is interned immutably; if it is `Mutability::Mut`, then the allocation *must be*
+/// already mutable (as a sanity check).
+///
+/// Returns an iterator over all relocations referred to by this allocation.
+fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>(
+    ecx: &mut InterpCx<'tcx, M>,
+    alloc_id: AllocId,
+    mutability: Mutability,
+    disambiguator: Option<&mut DisambiguatorState>,
+) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, InternError> {
+    trace!("intern_shallow {:?}", alloc_id);
+    // remove allocation
+    // FIXME(#120456) - is `swap_remove` correct?
+    let Some((kind, mut alloc)) = ecx.memory.alloc_map.swap_remove(&alloc_id) else {
+        return Err(InternError::DanglingPointer);
+    };
+
+    if let Err(err) = prepare_alloc(*ecx.tcx, kind, &mut alloc, mutability) {
+        // We want to error here, but we have to first put the
+        // allocation back into the `alloc_map` to keep things in a consistent state.
+        ecx.memory.alloc_map.insert(alloc_id, (kind, alloc));
+        return Err(err);
+    }
+
     // link the alloc id to the actual allocation
     let alloc = ecx.tcx.mk_const_alloc(alloc);
     if let Some(static_id) = ecx.machine.static_def_id() {
@@ -166,6 +190,7 @@ pub enum InternError {
     BadMutablePointer,
     DanglingPointer,
     ConstAllocNotGlobal,
+    PartialPointer,
 }
 
 /// Intern `ret` and everything it references.
@@ -221,13 +246,11 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
     let mut todo: Vec<_> = if is_static {
         // Do not steal the root allocation, we need it later to create the return value of `eval_static_initializer`.
         // But still change its mutability to match the requested one.
-        let alloc = ecx.memory.alloc_map.get_mut(&base_alloc_id).unwrap();
-        alloc.1.mutability = base_mutability;
-        alloc.1.provenance().ptrs().iter().map(|&(_, prov)| prov).collect()
+        let (kind, alloc) = ecx.memory.alloc_map.get_mut(&base_alloc_id).unwrap();
+        prepare_alloc(*ecx.tcx, *kind, alloc, base_mutability)?;
+        alloc.provenance().ptrs().iter().map(|&(_, prov)| prov).collect()
     } else {
-        intern_shallow(ecx, base_alloc_id, base_mutability, Some(&mut disambiguator))
-            .unwrap()
-            .collect()
+        intern_shallow(ecx, base_alloc_id, base_mutability, Some(&mut disambiguator))?.collect()
     };
     // We need to distinguish "has just been interned" from "was already in `tcx`",
     // so we track this in a separate set.
@@ -235,7 +258,6 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
     // Whether we encountered a bad mutable pointer.
     // We want to first report "dangling" and then "mutable", so we need to delay reporting these
     // errors.
-    let mut result = Ok(());
     let mut found_bad_mutable_ptr = false;
 
     // Keep interning as long as there are things to intern.
@@ -310,20 +332,15 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
         // okay with losing some potential for immutability here. This can anyway only affect
         // `static mut`.
         just_interned.insert(alloc_id);
-        match intern_shallow(ecx, alloc_id, inner_mutability, Some(&mut disambiguator)) {
-            Ok(nested) => todo.extend(nested),
-            Err(err) => {
-                ecx.tcx.dcx().delayed_bug("error during const interning");
-                result = Err(err);
-            }
-        }
+        let next = intern_shallow(ecx, alloc_id, inner_mutability, Some(&mut disambiguator))?;
+        todo.extend(next);
     }
-    if found_bad_mutable_ptr && result.is_ok() {
+    if found_bad_mutable_ptr {
         // We found a mutable pointer inside a const where inner allocations should be immutable,
         // and there was no other error. This should usually never happen! However, this can happen
         // in unleash-miri mode, so report it as a normal error then.
         if ecx.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you {
-            result = Err(InternError::BadMutablePointer);
+            return Err(InternError::BadMutablePointer);
         } else {
             span_bug!(
                 ecx.tcx.span,
@@ -331,7 +348,7 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>(
             );
         }
     }
-    result
+    Ok(())
 }
 
 /// Intern `ret`. This function assumes that `ret` references no other allocation.
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs
index 47bebf5371a..b5703e789ff 100644
--- a/compiler/rustc_const_eval/src/interpret/memory.rs
+++ b/compiler/rustc_const_eval/src/interpret/memory.rs
@@ -1310,29 +1310,20 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
     }
 
     /// Mark the given sub-range (relative to this allocation reference) as uninitialized.
-    pub fn write_uninit(&mut self, range: AllocRange) -> InterpResult<'tcx> {
+    pub fn write_uninit(&mut self, range: AllocRange) {
         let range = self.range.subrange(range);
 
-        self.alloc
-            .write_uninit(&self.tcx, range)
-            .map_err(|e| e.to_interp_error(self.alloc_id))
-            .into()
+        self.alloc.write_uninit(&self.tcx, range);
     }
 
     /// Mark the entire referenced range as uninitialized
-    pub fn write_uninit_full(&mut self) -> InterpResult<'tcx> {
-        self.alloc
-            .write_uninit(&self.tcx, self.range)
-            .map_err(|e| e.to_interp_error(self.alloc_id))
-            .into()
+    pub fn write_uninit_full(&mut self) {
+        self.alloc.write_uninit(&self.tcx, self.range);
     }
 
     /// Remove all provenance in the reference range.
-    pub fn clear_provenance(&mut self) -> InterpResult<'tcx> {
-        self.alloc
-            .clear_provenance(&self.tcx, self.range)
-            .map_err(|e| e.to_interp_error(self.alloc_id))
-            .into()
+    pub fn clear_provenance(&mut self) {
+        self.alloc.clear_provenance(&self.tcx, self.range);
     }
 }
 
@@ -1423,11 +1414,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
 
         // Side-step AllocRef and directly access the underlying bytes more efficiently.
         // (We are staying inside the bounds here and all bytes do get overwritten so all is good.)
-        let alloc_id = alloc_ref.alloc_id;
-        let bytes = alloc_ref
-            .alloc
-            .get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range)
-            .map_err(move |e| e.to_interp_error(alloc_id))?;
+        let bytes =
+            alloc_ref.alloc.get_bytes_unchecked_for_overwrite(&alloc_ref.tcx, alloc_ref.range);
         // `zip` would stop when the first iterator ends; we want to definitely
         // cover all of `bytes`.
         for dest in bytes {
@@ -1509,10 +1497,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
         // `get_bytes_mut` will clear the provenance, which is correct,
         // since we don't want to keep any provenance at the target.
         // This will also error if copying partial provenance is not supported.
-        let provenance = src_alloc
-            .provenance()
-            .prepare_copy(src_range, dest_offset, num_copies, self)
-            .map_err(|e| e.to_interp_error(src_alloc_id))?;
+        let provenance =
+            src_alloc.provenance().prepare_copy(src_range, dest_offset, num_copies, self);
         // Prepare a copy of the initialization mask.
         let init = src_alloc.init_mask().prepare_copy(src_range);
 
@@ -1530,10 +1516,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             dest_range,
         )?;
         // Yes we do overwrite all bytes in `dest_bytes`.
-        let dest_bytes = dest_alloc
-            .get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range)
-            .map_err(|e| e.to_interp_error(dest_alloc_id))?
-            .as_mut_ptr();
+        let dest_bytes =
+            dest_alloc.get_bytes_unchecked_for_overwrite_ptr(&tcx, dest_range).as_mut_ptr();
 
         if init.no_bytes_init() {
             // Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
@@ -1542,9 +1526,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
             // This also avoids writing to the target bytes so that the backing allocation is never
             // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
             // operating system this can avoid physically allocating the page.
-            dest_alloc
-                .write_uninit(&tcx, dest_range)
-                .map_err(|e| e.to_interp_error(dest_alloc_id))?;
+            dest_alloc.write_uninit(&tcx, dest_range);
             // `write_uninit` also resets the provenance, so we are done.
             return interp_ok(());
         }
diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs
index e2284729efd..afd96a7c0c9 100644
--- a/compiler/rustc_const_eval/src/interpret/place.rs
+++ b/compiler/rustc_const_eval/src/interpret/place.rs
@@ -700,7 +700,7 @@ where
 
         match value {
             Immediate::Scalar(scalar) => {
-                alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
+                alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)?;
             }
             Immediate::ScalarPair(a_val, b_val) => {
                 let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
@@ -720,10 +720,10 @@ where
                 alloc.write_scalar(alloc_range(Size::ZERO, a_val.size()), a_val)?;
                 alloc.write_scalar(alloc_range(b_offset, b_val.size()), b_val)?;
                 // We don't have to reset padding here, `write_immediate` will anyway do a validation run.
-                interp_ok(())
             }
             Immediate::Uninit => alloc.write_uninit_full(),
         }
+        interp_ok(())
     }
 
     pub fn write_uninit(
@@ -743,7 +743,7 @@ where
                     // Zero-sized access
                     return interp_ok(());
                 };
-                alloc.write_uninit_full()?;
+                alloc.write_uninit_full();
             }
         }
         interp_ok(())
@@ -767,7 +767,7 @@ where
                     // Zero-sized access
                     return interp_ok(());
                 };
-                alloc.clear_provenance()?;
+                alloc.clear_provenance();
             }
         }
         interp_ok(())
diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs
index 693b3782960..f8d9ebcafa7 100644
--- a/compiler/rustc_const_eval/src/interpret/validity.rs
+++ b/compiler/rustc_const_eval/src/interpret/validity.rs
@@ -949,7 +949,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
                 let padding_size = offset - padding_cleared_until;
                 let range = alloc_range(padding_start, padding_size);
                 trace!("reset_padding on {}: resetting padding range {range:?}", mplace.layout.ty);
-                alloc.write_uninit(range)?;
+                alloc.write_uninit(range);
             }
             padding_cleared_until = offset + size;
         }
@@ -1239,7 +1239,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
                 if self.reset_provenance_and_padding {
                     // We can't share this with above as above, we might be looking at read-only memory.
                     let mut alloc = self.ecx.get_ptr_alloc_mut(mplace.ptr(), size)?.expect("we already excluded size 0");
-                    alloc.clear_provenance()?;
+                    alloc.clear_provenance();
                     // Also, mark this as containing data, not padding.
                     self.add_data_range(mplace.ptr(), size);
                 }