diff options
| author | Ralf Jung <post@ralfj.de> | 2022-04-17 19:27:41 -0400 |
|---|---|---|
| committer | Ralf Jung <post@ralfj.de> | 2022-04-17 19:32:13 -0400 |
| commit | 3ec1febbf570b555c35db8228b27c4e300d11e0b (patch) | |
| tree | 14bb5c6a0d76cc76101818461662a47f7c7ad72d | |
| parent | 85ee04c44a1d4ffc2f2e3bb0c217908ce4062a18 (diff) | |
| download | rust-3ec1febbf570b555c35db8228b27c4e300d11e0b.tar.gz rust-3ec1febbf570b555c35db8228b27c4e300d11e0b.zip | |
add caution to some comments
| -rw-r--r-- | compiler/rustc_const_eval/src/interpret/memory.rs | 2 | ||||
| -rw-r--r-- | compiler/rustc_middle/src/mir/interpret/allocation.rs | 8 |
2 files changed, 8 insertions, 2 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 32b739f9335..9ae50d0df80 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -1058,7 +1058,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { // operating system this can avoid physically allocating the page. dest_alloc .write_uninit(&tcx, dest_range) - .map_err(|e| e.to_interp_error(dest_alloc_id))?; // `Size` multiplication + .map_err(|e| e.to_interp_error(dest_alloc_id))?; // We can forget about the relocations, this is all not initialized anyway. return Ok(()); } diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index e106f9efc18..ad1ea1a6d39 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -269,7 +269,7 @@ impl<Tag: Provenance, Extra> Allocation<Tag, Extra> { /// `get_bytes_with_uninit_and_ptr` instead, /// /// This function also guarantees that the resulting pointer will remain stable - /// even when new allocations are pushed to the `HashMap`. `copy_repeatedly` relies + /// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies /// on that. /// /// It is the caller's responsibility to check bounds and alignment beforehand. @@ -605,6 +605,9 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> { /// Applies a relocation copy. /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected /// to be clear of relocations. + /// + /// This is dangerous to use as it can violate internal `Allocation` invariants! + /// It only exists to support an efficient implementation of `mem_copy_repeatedly`. pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Tag>) { self.relocations.0.insert_presorted(relocations.relative_relocations); } @@ -1124,6 +1127,9 @@ impl<Tag, Extra> Allocation<Tag, Extra> { } /// Applies multiple instances of the run-length encoding to the initialization mask. + /// + /// This is dangerous to use as it can violate internal `Allocation` invariants! + /// It only exists to support an efficient implementation of `mem_copy_repeatedly`. pub fn mark_compressed_init_range( &mut self, defined: &InitMaskCompressed, |
