diff options
Diffstat (limited to 'compiler/rustc_const_eval/src')
8 files changed, 85 insertions, 25 deletions
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 8a07f90c951..b3f4fd5e2a1 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -216,7 +216,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?; } sym::write_bytes => { - self.write_bytes_intrinsic(&args[0], &args[1], &args[2])?; + self.write_bytes_intrinsic(&args[0], &args[1], &args[2], "write_bytes")?; } sym::compare_bytes => { let result = self.compare_bytes_intrinsic(&args[0], &args[1], &args[2])?; @@ -599,9 +599,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let count = self.read_target_usize(count)?; let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap())?; let (size, align) = (layout.size, layout.align.abi); - // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max), - // but no actual allocation can be big enough for the difference to be noticeable. - let size = size.checked_mul(count, self).ok_or_else(|| { + + let size = self.compute_size_in_bytes(size, count).ok_or_else(|| { err_ub_custom!( fluent::const_eval_size_overflow, name = if nonoverlapping { "copy_nonoverlapping" } else { "copy" } @@ -635,11 +634,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { Ok(()) } - pub(crate) fn write_bytes_intrinsic( + pub fn write_bytes_intrinsic( &mut self, dst: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, byte: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, count: &OpTy<'tcx, <M as Machine<'tcx>>::Provenance>, + name: &'static str, ) -> InterpResult<'tcx> { let layout = self.layout_of(dst.layout.ty.builtin_deref(true).unwrap())?; @@ -649,9 +649,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // `checked_mul` enforces a too small bound (the correct one would probably be target_isize_max), // but no actual allocation can be big enough for the difference to be noticeable. - let len = layout.size.checked_mul(count, self).ok_or_else(|| { - err_ub_custom!(fluent::const_eval_size_overflow, name = "write_bytes") - })?; + let len = self + .compute_size_in_bytes(layout.size, count) + .ok_or_else(|| err_ub_custom!(fluent::const_eval_size_overflow, name = name))?; let bytes = std::iter::repeat(byte).take(len.bytes_usize()); self.write_bytes_ptr(dst, bytes) diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index 8cab3c34eed..67c99934059 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -540,10 +540,29 @@ pub trait Machine<'tcx>: Sized { Ok(ReturnAction::Normal) } + /// Called immediately after an "immediate" local variable is read + /// (i.e., this is called for reads that do not end up accessing addressable memory). + #[inline(always)] + fn after_local_read(_ecx: &InterpCx<'tcx, Self>, _local: mir::Local) -> InterpResult<'tcx> { + Ok(()) + } + + /// Called immediately after an "immediate" local variable is assigned a new value + /// (i.e., this is called for writes that do not end up in memory). + /// `storage_live` indicates whether this is the initial write upon `StorageLive`. + #[inline(always)] + fn after_local_write( + _ecx: &mut InterpCx<'tcx, Self>, + _local: mir::Local, + _storage_live: bool, + ) -> InterpResult<'tcx> { + Ok(()) + } + /// Called immediately after actual memory was allocated for a local /// but before the local's stack frame is updated to point to that memory. #[inline(always)] - fn after_local_allocated( + fn after_local_moved_to_memory( _ecx: &mut InterpCx<'tcx, Self>, _local: mir::Local, _mplace: &MPlaceTy<'tcx, Self::Provenance>, diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index bd46541dc50..b1d1dab7c16 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -222,7 +222,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } else { Allocation::try_uninit(size, align)? }; - self.allocate_raw_ptr(alloc, kind) + self.insert_allocation(alloc, kind) } pub fn allocate_bytes_ptr( @@ -233,14 +233,15 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { mutability: Mutability, ) -> InterpResult<'tcx, Pointer<M::Provenance>> { let alloc = Allocation::from_bytes(bytes, align, mutability); - self.allocate_raw_ptr(alloc, kind) + self.insert_allocation(alloc, kind) } - pub fn allocate_raw_ptr( + pub fn insert_allocation( &mut self, alloc: Allocation<M::Provenance, (), M::Bytes>, kind: MemoryKind<M::MemoryKind>, ) -> InterpResult<'tcx, Pointer<M::Provenance>> { + assert!(alloc.size() <= self.max_size_of_val()); let id = self.tcx.reserve_alloc_id(); debug_assert_ne!( Some(kind), @@ -1046,6 +1047,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { ); res } + + pub(super) fn validation_in_progress(&self) -> bool { + self.memory.validation_in_progress + } } #[doc(hidden)] diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index 2e02d1001c8..ead3ef6861a 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -697,6 +697,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { if matches!(op, Operand::Immediate(_)) { assert!(!layout.is_unsized()); } + M::after_local_read(self, local)?; Ok(OpTy { op, layout }) } diff --git a/compiler/rustc_const_eval/src/interpret/operator.rs b/compiler/rustc_const_eval/src/interpret/operator.rs index b390bb87789..7d32fcbb664 100644 --- a/compiler/rustc_const_eval/src/interpret/operator.rs +++ b/compiler/rustc_const_eval/src/interpret/operator.rs @@ -1,11 +1,12 @@ use either::Either; use rustc_apfloat::{Float, FloatConvert}; -use rustc_middle::mir::interpret::{InterpResult, Scalar}; +use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar}; use rustc_middle::mir::NullOp; use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, FloatTy, ScalarInt, Ty}; use rustc_middle::{bug, mir, span_bug}; use rustc_span::symbol::sym; +use rustc_target::abi::Size; use tracing::trace; use super::{throw_ub, ImmTy, InterpCx, Machine, MemPlaceMeta}; @@ -287,6 +288,20 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { }) } + /// Computes the total size of this access, `count * elem_size`, + /// checking for overflow beyond isize::MAX. + pub fn compute_size_in_bytes(&self, elem_size: Size, count: u64) -> Option<Size> { + // `checked_mul` applies `u64` limits independent of the target pointer size... but the + // subsequent check for `max_size_of_val` means we also handle 32bit targets correctly. + // (We cannot use `Size::checked_mul` as that enforces `obj_size_bound` as the limit, which + // would be wrong here.) + elem_size + .bytes() + .checked_mul(count) + .map(Size::from_bytes) + .filter(|&total| total <= self.max_size_of_val()) + } + fn binary_ptr_op( &self, bin_op: mir::BinOp, diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 9dd9ca80385..e398c4c0742 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -500,15 +500,13 @@ where &self, local: mir::Local, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> { - // Other parts of the system rely on `Place::Local` never being unsized. - // So we eagerly check here if this local has an MPlace, and if yes we use it. let frame = self.frame(); let layout = self.layout_of_local(frame, local, None)?; let place = if layout.is_sized() { // We can just always use the `Local` for sized values. Place::Local { local, offset: None, locals_addr: frame.locals_addr() } } else { - // Unsized `Local` isn't okay (we cannot store the metadata). + // Other parts of the system rely on `Place::Local` never being unsized. match frame.locals[local].access()? { Operand::Immediate(_) => bug!(), Operand::Indirect(mplace) => Place::Ptr(*mplace), @@ -562,7 +560,10 @@ where place: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult< 'tcx, - Either<MPlaceTy<'tcx, M::Provenance>, (&mut Immediate<M::Provenance>, TyAndLayout<'tcx>)>, + Either< + MPlaceTy<'tcx, M::Provenance>, + (&mut Immediate<M::Provenance>, TyAndLayout<'tcx>, mir::Local), + >, > { Ok(match place.to_place().as_mplace_or_local() { Left(mplace) => Left(mplace), @@ -581,7 +582,7 @@ where } Operand::Immediate(local_val) => { // The local still has the optimized representation. - Right((local_val, layout)) + Right((local_val, layout, local)) } } } @@ -643,9 +644,13 @@ where assert!(dest.layout().is_sized(), "Cannot write unsized immediate data"); match self.as_mplace_or_mutable_local(&dest.to_place())? { - Right((local_val, local_layout)) => { + Right((local_val, local_layout, local)) => { // Local can be updated in-place. *local_val = src; + // Call the machine hook (the data race detector needs to know about this write). + if !self.validation_in_progress() { + M::after_local_write(self, local, /*storage_live*/ false)?; + } // Double-check that the value we are storing and the local fit to each other. if cfg!(debug_assertions) { src.assert_matches_abi(local_layout.abi, self); @@ -714,8 +719,12 @@ where dest: &impl Writeable<'tcx, M::Provenance>, ) -> InterpResult<'tcx> { match self.as_mplace_or_mutable_local(&dest.to_place())? { - Right((local_val, _local_layout)) => { + Right((local_val, _local_layout, local)) => { *local_val = Immediate::Uninit; + // Call the machine hook (the data race detector needs to know about this write). + if !self.validation_in_progress() { + M::after_local_write(self, local, /*storage_live*/ false)?; + } } Left(mplace) => { let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else { @@ -734,8 +743,12 @@ where dest: &impl Writeable<'tcx, M::Provenance>, ) -> InterpResult<'tcx> { match self.as_mplace_or_mutable_local(&dest.to_place())? { - Right((local_val, _local_layout)) => { + Right((local_val, _local_layout, local)) => { local_val.clear_provenance()?; + // Call the machine hook (the data race detector needs to know about this write). + if !self.validation_in_progress() { + M::after_local_write(self, local, /*storage_live*/ false)?; + } } Left(mplace) => { let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else { @@ -941,7 +954,7 @@ where mplace.mplace, )?; } - M::after_local_allocated(self, local, &mplace)?; + M::after_local_moved_to_memory(self, local, &mplace)?; // Now we can call `access_mut` again, asserting it goes well, and actually // overwrite things. This points to the entire allocation, not just the part // the place refers to, i.e. we do this before we apply `offset`. diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 395e78e623b..58ce44e4014 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -17,7 +17,7 @@ use rustc_target::abi::{self, Size, VariantIdx}; use tracing::{debug, instrument}; use super::{ - throw_ub, throw_unsup, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, + err_ub, throw_ub, throw_unsup, InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Provenance, Scalar, }; @@ -229,7 +229,11 @@ where // This can only be reached in ConstProp and non-rustc-MIR. throw_ub!(BoundsCheckFailed { len, index }); } - let offset = stride * index; // `Size` multiplication + // With raw slices, `len` can be so big that this *can* overflow. + let offset = self + .compute_size_in_bytes(stride, index) + .ok_or_else(|| err_ub!(PointerArithOverflow))?; + // All fields have the same layout. let field_layout = base.layout().field(self, 0); (offset, field_layout) diff --git a/compiler/rustc_const_eval/src/interpret/stack.rs b/compiler/rustc_const_eval/src/interpret/stack.rs index b6e83715e39..db418c82f66 100644 --- a/compiler/rustc_const_eval/src/interpret/stack.rs +++ b/compiler/rustc_const_eval/src/interpret/stack.rs @@ -534,8 +534,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?; Operand::Indirect(*dest_place.mplace()) } else { - assert!(!meta.has_meta()); // we're dropping the metadata // Just make this an efficient immediate. + assert!(!meta.has_meta()); // we're dropping the metadata + // Make sure the machine knows this "write" is happening. (This is important so that + // races involving local variable allocation can be detected by Miri.) + M::after_local_write(self, local, /*storage_live*/ true)?; // Note that not calling `layout_of` here does have one real consequence: // if the type is too big, we'll only notice this when the local is actually initialized, // which is a bit too late -- we should ideally notice this already here, when the memory |
