diff options
Diffstat (limited to 'compiler/rustc_const_eval')
20 files changed, 492 insertions, 208 deletions
diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl index 22a1894ee72..aa0bc42d448 100644 --- a/compiler/rustc_const_eval/messages.ftl +++ b/compiler/rustc_const_eval/messages.ftl @@ -56,6 +56,17 @@ const_eval_const_context = {$kind -> *[other] {""} } +const_eval_const_heap_ptr_in_final = encountered `const_allocate` pointer in final value that was not made global + .note = use `const_make_global` to make allocated pointers immutable before returning + +const_eval_const_make_global_ptr_already_made_global = attempting to call `const_make_global` twice on the same allocation {$alloc} + +const_eval_const_make_global_ptr_is_non_heap = pointer passed to `const_make_global` does not point to a heap allocation: {$ptr} + +const_eval_const_make_global_with_dangling_ptr = pointer passed to `const_make_global` is dangling: {$ptr} + +const_eval_const_make_global_with_offset = making {$ptr} global which does not point to the beginning of an object + const_eval_copy_nonoverlapping_overlapping = `copy_nonoverlapping` called on overlapping ranges @@ -78,6 +89,8 @@ const_eval_dealloc_kind_mismatch = const_eval_deref_function_pointer = accessing {$allocation} which contains a function +const_eval_deref_typeid_pointer = + accessing {$allocation} which contains a `TypeId` const_eval_deref_vtable_pointer = accessing {$allocation} which contains a vtable const_eval_division_by_zero = @@ -294,19 +307,22 @@ const_eval_pointer_arithmetic_overflow = overflowing pointer arithmetic: the total offset in bytes does not fit in an `isize` const_eval_pointer_out_of_bounds = - {const_eval_bad_pointer_op_attempting}, but got {$pointer} which {$inbounds_size_is_neg -> - [false] {$alloc_size_minus_ptr_offset -> - [0] is at or beyond the end of the allocation of size {$alloc_size -> - [1] 1 byte - *[x] {$alloc_size} bytes + {const_eval_bad_pointer_op_attempting}, but got {$pointer} which {$ptr_offset_is_neg -> + [true] points to before the beginning of the allocation + *[false] {$inbounds_size_is_neg -> + [false] {$alloc_size_minus_ptr_offset -> + [0] is at or beyond the end of the allocation of size {$alloc_size -> + [1] 1 byte + *[x] {$alloc_size} bytes + } + [1] is only 1 byte from the end of the allocation + *[x] is only {$alloc_size_minus_ptr_offset} bytes from the end of the allocation + } + *[true] {$ptr_offset_abs -> + [0] is at the beginning of the allocation + *[other] is only {$ptr_offset_abs} bytes from the beginning of the allocation } - [1] is only 1 byte from the end of the allocation - *[x] is only {$alloc_size_minus_ptr_offset} bytes from the end of the allocation - } - *[true] {$ptr_offset_abs -> - [0] is at the beginning of the allocation - *[other] is only {$ptr_offset_abs} bytes from the beginning of the allocation - } + } } const_eval_pointer_use_after_free = diff --git a/compiler/rustc_const_eval/src/check_consts/mod.rs b/compiler/rustc_const_eval/src/check_consts/mod.rs index 9ab8e0692e1..ebf18c6f2ac 100644 --- a/compiler/rustc_const_eval/src/check_consts/mod.rs +++ b/compiler/rustc_const_eval/src/check_consts/mod.rs @@ -93,7 +93,7 @@ pub fn rustc_allow_const_fn_unstable( /// world into two functions: those that are safe to expose on stable (and hence may not use /// unstable features, not even recursively), and those that are not. pub fn is_fn_or_trait_safe_to_expose_on_stable(tcx: TyCtxt<'_>, def_id: DefId) -> bool { - // A default body in a `#[const_trait]` is const-stable when the trait is const-stable. + // A default body in a `const trait` is const-stable when the trait is const-stable. if tcx.is_const_default_method(def_id) { return is_fn_or_trait_safe_to_expose_on_stable(tcx, tcx.parent(def_id)); } diff --git a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs index b6e2682af36..438aed41b8b 100644 --- a/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/dummy_machine.rs @@ -49,7 +49,6 @@ impl HasStaticRootDefId for DummyMachine { impl<'tcx> interpret::Machine<'tcx> for DummyMachine { interpret::compile_time_machine!(<'tcx>); - type MemoryKind = !; const PANIC_ON_ALLOC_FAIL: bool = true; // We want to just eval random consts in the program, so `eval_mir_const` can fail. diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs index 08fc03d9c46..3e880d02001 100644 --- a/compiler/rustc_const_eval/src/const_eval/error.rs +++ b/compiler/rustc_const_eval/src/const_eval/error.rs @@ -2,7 +2,7 @@ use std::mem; use rustc_errors::{Diag, DiagArgName, DiagArgValue, DiagMessage, IntoDiagArg}; use rustc_middle::mir::AssertKind; -use rustc_middle::mir::interpret::{Provenance, ReportedErrorInfo}; +use rustc_middle::mir::interpret::{AllocId, Provenance, ReportedErrorInfo}; use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::layout::LayoutError; use rustc_middle::ty::{ConstInt, TyCtxt}; @@ -11,8 +11,8 @@ use rustc_span::{Span, Symbol}; use super::CompileTimeMachine; use crate::errors::{self, FrameNote, ReportErrorExt}; use crate::interpret::{ - ErrorHandled, Frame, InterpErrorInfo, InterpErrorKind, MachineStopType, err_inval, - err_machine_stop, + CtfeProvenance, ErrorHandled, Frame, InterpErrorInfo, InterpErrorKind, MachineStopType, + Pointer, err_inval, err_machine_stop, }; /// The CTFE machine has some custom error kinds. @@ -22,8 +22,22 @@ pub enum ConstEvalErrKind { ModifiedGlobal, RecursiveStatic, AssertFailure(AssertKind<ConstInt>), - Panic { msg: Symbol, line: u32, col: u32, file: Symbol }, + Panic { + msg: Symbol, + line: u32, + col: u32, + file: Symbol, + }, WriteThroughImmutablePointer, + /// Called `const_make_global` twice. + ConstMakeGlobalPtrAlreadyMadeGlobal(AllocId), + /// Called `const_make_global` on a non-heap pointer. + ConstMakeGlobalPtrIsNonHeap(Pointer<Option<CtfeProvenance>>), + /// Called `const_make_global` on a dangling pointer. + ConstMakeGlobalWithDanglingPtr(Pointer<Option<CtfeProvenance>>), + /// Called `const_make_global` on a pointer that does not start at the + /// beginning of an object. + ConstMakeGlobalWithOffset(Pointer<Option<CtfeProvenance>>), } impl MachineStopType for ConstEvalErrKind { @@ -38,6 +52,12 @@ impl MachineStopType for ConstEvalErrKind { RecursiveStatic => const_eval_recursive_static, AssertFailure(x) => x.diagnostic_message(), WriteThroughImmutablePointer => const_eval_write_through_immutable_pointer, + ConstMakeGlobalPtrAlreadyMadeGlobal { .. } => { + const_eval_const_make_global_ptr_already_made_global + } + ConstMakeGlobalPtrIsNonHeap(_) => const_eval_const_make_global_ptr_is_non_heap, + ConstMakeGlobalWithDanglingPtr(_) => const_eval_const_make_global_with_dangling_ptr, + ConstMakeGlobalWithOffset(_) => const_eval_const_make_global_with_offset, } } fn add_args(self: Box<Self>, adder: &mut dyn FnMut(DiagArgName, DiagArgValue)) { @@ -51,6 +71,14 @@ impl MachineStopType for ConstEvalErrKind { Panic { msg, .. } => { adder("msg".into(), msg.into_diag_arg(&mut None)); } + ConstMakeGlobalPtrIsNonHeap(ptr) + | ConstMakeGlobalWithOffset(ptr) + | ConstMakeGlobalWithDanglingPtr(ptr) => { + adder("ptr".into(), format!("{ptr:?}").into_diag_arg(&mut None)); + } + ConstMakeGlobalPtrAlreadyMadeGlobal(alloc) => { + adder("alloc".into(), alloc.into_diag_arg(&mut None)); + } } } } diff --git a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs index 08e1877f0eb..ce72d59b8b0 100644 --- a/compiler/rustc_const_eval/src/const_eval/eval_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/eval_queries.rs @@ -18,8 +18,8 @@ use tracing::{debug, instrument, trace}; use super::{CanAccessMutGlobal, CompileTimeInterpCx, CompileTimeMachine}; use crate::const_eval::CheckAlignment; use crate::interpret::{ - CtfeValidationMode, GlobalId, Immediate, InternKind, InternResult, InterpCx, InterpErrorKind, - InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, StackPopCleanup, create_static_alloc, + CtfeValidationMode, GlobalId, Immediate, InternError, InternKind, InterpCx, InterpErrorKind, + InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, ReturnContinuation, create_static_alloc, intern_const_alloc_recursive, interp_ok, throw_exhaust, }; use crate::{CTRL_C_RECEIVED, errors}; @@ -76,7 +76,7 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>( cid.instance, body, &ret.clone().into(), - StackPopCleanup::Root { cleanup: false }, + ReturnContinuation::Stop { cleanup: false }, )?; ecx.storage_live_for_always_live_locals()?; @@ -93,25 +93,30 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>( // Since evaluation had no errors, validate the resulting constant. const_validate_mplace(ecx, &ret, cid)?; - // Only report this after validation, as validaiton produces much better diagnostics. + // Only report this after validation, as validation produces much better diagnostics. // FIXME: ensure validation always reports this and stop making interning care about it. match intern_result { Ok(()) => {} - Err(InternResult::FoundDanglingPointer) => { + Err(InternError::DanglingPointer) => { throw_inval!(AlreadyReported(ReportedErrorInfo::non_const_eval_error( ecx.tcx .dcx() .emit_err(errors::DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind }), ))); } - Err(InternResult::FoundBadMutablePointer) => { + Err(InternError::BadMutablePointer) => { throw_inval!(AlreadyReported(ReportedErrorInfo::non_const_eval_error( ecx.tcx .dcx() .emit_err(errors::MutablePtrInFinal { span: ecx.tcx.span, kind: intern_kind }), ))); } + Err(InternError::ConstAllocNotGlobal) => { + throw_inval!(AlreadyReported(ReportedErrorInfo::non_const_eval_error( + ecx.tcx.dcx().emit_err(errors::ConstHeapPtrInFinal { span: ecx.tcx.span }), + ))); + } } interp_ok(R::make_result(ret, ecx)) diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index 76fa744361a..f24fb18f83b 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -169,13 +169,19 @@ pub type CompileTimeInterpCx<'tcx> = InterpCx<'tcx, CompileTimeMachine<'tcx>>; #[derive(Debug, PartialEq, Eq, Copy, Clone)] pub enum MemoryKind { - Heap, + Heap { + /// Indicates whether `make_global` was called on this allocation. + /// If this is `true`, the allocation must be immutable. + was_made_global: bool, + }, } impl fmt::Display for MemoryKind { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { - MemoryKind::Heap => write!(f, "heap allocation"), + MemoryKind::Heap { was_made_global } => { + write!(f, "heap allocation{}", if *was_made_global { " (made global)" } else { "" }) + } } } } @@ -184,7 +190,7 @@ impl interpret::MayLeak for MemoryKind { #[inline(always)] fn may_leak(self) -> bool { match self { - MemoryKind::Heap => false, + MemoryKind::Heap { was_made_global } => was_made_global, } } } @@ -314,8 +320,6 @@ impl<'tcx> CompileTimeMachine<'tcx> { impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> { compile_time_machine!(<'tcx>); - type MemoryKind = MemoryKind; - const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error #[inline(always)] @@ -331,10 +335,10 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> { fn load_mir( ecx: &InterpCx<'tcx, Self>, instance: ty::InstanceKind<'tcx>, - ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> { + ) -> &'tcx mir::Body<'tcx> { match instance { - ty::InstanceKind::Item(def) => interp_ok(ecx.tcx.mir_for_ctfe(def)), - _ => interp_ok(ecx.tcx.instance_mir(instance)), + ty::InstanceKind::Item(def) => ecx.tcx.mir_for_ctfe(def), + _ => ecx.tcx.instance_mir(instance), } } @@ -359,8 +363,8 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> { if let ty::InstanceKind::Item(def) = instance.def { // Execution might have wandered off into other crates, so we cannot do a stability- // sensitive check here. But we can at least rule out functions that are not const at - // all. That said, we have to allow calling functions inside a trait marked with - // #[const_trait]. These *are* const-checked! + // all. That said, we have to allow calling functions inside a `const trait`. These + // *are* const-checked! if !ecx.tcx.is_const_fn(def) || ecx.tcx.has_attr(def, sym::rustc_do_not_const_check) { // We certainly do *not* want to actually call the fn // though, so be sure we return here. @@ -420,7 +424,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> { let ptr = ecx.allocate_ptr( Size::from_bytes(size), align, - interpret::MemoryKind::Machine(MemoryKind::Heap), + interpret::MemoryKind::Machine(MemoryKind::Heap { was_made_global: false }), AllocInit::Uninit, )?; ecx.write_pointer(ptr, dest)?; @@ -453,10 +457,17 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> { ecx.deallocate_ptr( ptr, Some((size, align)), - interpret::MemoryKind::Machine(MemoryKind::Heap), + interpret::MemoryKind::Machine(MemoryKind::Heap { was_made_global: false }), )?; } } + + sym::const_make_global => { + let ptr = ecx.read_pointer(&args[0])?; + ecx.make_const_heap_ptr_global(ptr)?; + ecx.write_pointer(ptr, dest)?; + } + // The intrinsic represents whether the value is known to the optimizer (LLVM). // We're not doing any optimizations here, so there is no optimizer that could know the value. // (We know the value here in the machine of course, but this is the runtime of that code, diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index 9133a5fc8ef..b6a64035261 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -44,6 +44,14 @@ pub(crate) struct MutablePtrInFinal { } #[derive(Diagnostic)] +#[diag(const_eval_const_heap_ptr_in_final)] +#[note] +pub(crate) struct ConstHeapPtrInFinal { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] #[diag(const_eval_unstable_in_stable_exposed)] pub(crate) struct UnstableInStableExposed { pub gate: String, @@ -475,6 +483,7 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { WriteToReadOnly(_) => const_eval_write_to_read_only, DerefFunctionPointer(_) => const_eval_deref_function_pointer, DerefVTablePointer(_) => const_eval_deref_vtable_pointer, + DerefTypeIdPointer(_) => const_eval_deref_typeid_pointer, InvalidBool(_) => const_eval_invalid_bool, InvalidChar(_) => const_eval_invalid_char, InvalidTag(_) => const_eval_invalid_tag, @@ -588,7 +597,10 @@ impl<'a> ReportErrorExt for UndefinedBehaviorInfo<'a> { diag.arg("has", has.bytes()); diag.arg("msg", format!("{msg:?}")); } - WriteToReadOnly(alloc) | DerefFunctionPointer(alloc) | DerefVTablePointer(alloc) => { + WriteToReadOnly(alloc) + | DerefFunctionPointer(alloc) + | DerefVTablePointer(alloc) + | DerefTypeIdPointer(alloc) => { diag.arg("allocation", alloc); } InvalidBool(b) => { diff --git a/compiler/rustc_const_eval/src/interpret/call.rs b/compiler/rustc_const_eval/src/interpret/call.rs index ebaa5a97a4a..1503f3bcd99 100644 --- a/compiler/rustc_const_eval/src/interpret/call.rs +++ b/compiler/rustc_const_eval/src/interpret/call.rs @@ -6,7 +6,7 @@ use std::borrow::Cow; use either::{Left, Right}; use rustc_abi::{self as abi, ExternAbi, FieldIdx, Integer, VariantIdx}; use rustc_hir::def_id::DefId; -use rustc_middle::ty::layout::{FnAbiOf, IntegerExt, TyAndLayout}; +use rustc_middle::ty::layout::{IntegerExt, TyAndLayout}; use rustc_middle::ty::{self, AdtDef, Instance, Ty, VariantDef}; use rustc_middle::{bug, mir, span_bug}; use rustc_span::sym; @@ -15,7 +15,7 @@ use tracing::{info, instrument, trace}; use super::{ CtfeProvenance, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, - Projectable, Provenance, ReturnAction, Scalar, StackPopCleanup, StackPopInfo, interp_ok, + Projectable, Provenance, ReturnAction, ReturnContinuation, Scalar, StackPopInfo, interp_ok, throw_ub, throw_ub_custom, throw_unsup_format, }; use crate::fluent_generated as fluent; @@ -340,7 +340,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { args: &[FnArg<'tcx, M::Provenance>], with_caller_location: bool, destination: &PlaceTy<'tcx, M::Provenance>, - mut stack_pop: StackPopCleanup, + mut cont: ReturnContinuation, ) -> InterpResult<'tcx> { // Compute callee information. // FIXME: for variadic support, do we have to somehow determine callee's extra_args? @@ -365,15 +365,15 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { if !callee_fn_abi.can_unwind { // The callee cannot unwind, so force the `Unreachable` unwind handling. - match &mut stack_pop { - StackPopCleanup::Root { .. } => {} - StackPopCleanup::Goto { unwind, .. } => { + match &mut cont { + ReturnContinuation::Stop { .. } => {} + ReturnContinuation::Goto { unwind, .. } => { *unwind = mir::UnwindAction::Unreachable; } } } - self.push_stack_frame_raw(instance, body, destination, stack_pop)?; + self.push_stack_frame_raw(instance, body, destination, cont)?; // If an error is raised here, pop the frame again to get an accurate backtrace. // To this end, we wrap it all in a `try` block. @@ -617,7 +617,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { &args, with_caller_location, destination, - StackPopCleanup::Goto { ret: target, unwind }, + ReturnContinuation::Goto { ret: target, unwind }, ) } // `InstanceKind::Virtual` does not have callable MIR. Calls to `Virtual` instances must be @@ -755,8 +755,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Note that we are using `pop_stack_frame_raw` and not `return_from_current_stack_frame`, // as the latter "executes" the goto to the return block, but we don't want to, // only the tail called function should return to the current return block. - let StackPopInfo { return_action, return_to_block, return_place } = self - .pop_stack_frame_raw(false, |_this, _return_place| { + let StackPopInfo { return_action, return_cont, return_place } = + self.pop_stack_frame_raw(false, |_this, _return_place| { // This function's return value is just discarded, the tail-callee will fill in the return place instead. interp_ok(()) })?; @@ -764,7 +764,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { assert_eq!(return_action, ReturnAction::Normal); // Take the "stack pop cleanup" info, and use that to initiate the next call. - let StackPopCleanup::Goto { ret, unwind } = return_to_block else { + let ReturnContinuation::Goto { ret, unwind } = return_cont else { bug!("can't tailcall as root"); }; @@ -896,23 +896,23 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Normal return, figure out where to jump. if unwinding { // Follow the unwind edge. - match stack_pop_info.return_to_block { - StackPopCleanup::Goto { unwind, .. } => { + match stack_pop_info.return_cont { + ReturnContinuation::Goto { unwind, .. } => { // This must be the very last thing that happens, since it can in fact push a new stack frame. self.unwind_to_block(unwind) } - StackPopCleanup::Root { .. } => { - panic!("encountered StackPopCleanup::Root when unwinding!") + ReturnContinuation::Stop { .. } => { + panic!("encountered ReturnContinuation::Stop when unwinding!") } } } else { // Follow the normal return edge. - match stack_pop_info.return_to_block { - StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret), - StackPopCleanup::Root { .. } => { + match stack_pop_info.return_cont { + ReturnContinuation::Goto { ret, .. } => self.return_to_block(ret), + ReturnContinuation::Stop { .. } => { assert!( self.stack().is_empty(), - "only the bottommost frame can have StackPopCleanup::Root" + "only the bottommost frame can have ReturnContinuation::Stop" ); interp_ok(()) } diff --git a/compiler/rustc_const_eval/src/interpret/eval_context.rs b/compiler/rustc_const_eval/src/interpret/eval_context.rs index 46c784b41c6..11e7706fe60 100644 --- a/compiler/rustc_const_eval/src/interpret/eval_context.rs +++ b/compiler/rustc_const_eval/src/interpret/eval_context.rs @@ -7,8 +7,8 @@ use rustc_hir::def_id::DefId; use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo}; use rustc_middle::query::TyCtxtAt; use rustc_middle::ty::layout::{ - self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers, - TyAndLayout, + self, FnAbiError, FnAbiOf, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, + LayoutOfHelpers, TyAndLayout, }; use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, TypeFoldable, TypingEnv, Variance}; use rustc_middle::{mir, span_bug}; @@ -92,20 +92,6 @@ impl<'tcx, M: Machine<'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'tcx, M> { } } -impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { - /// This inherent method takes priority over the trait method with the same name in LayoutOf, - /// and allows wrapping the actual [LayoutOf::layout_of] with a tracing span. - /// See [LayoutOf::layout_of] for the original documentation. - #[inline] - pub fn layout_of( - &self, - ty: Ty<'tcx>, - ) -> <InterpCx<'tcx, M> as LayoutOfHelpers<'tcx>>::LayoutOfResult { - let _span = enter_trace_span!(M, "InterpCx::layout_of", "ty = {:?}", ty.kind()); - LayoutOf::layout_of(self, ty) - } -} - impl<'tcx, M: Machine<'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'tcx, M> { type FnAbiOfResult = Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, InterpErrorKind<'tcx>>; @@ -121,6 +107,43 @@ impl<'tcx, M: Machine<'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'tcx, M> { } } +impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { + /// This inherent method takes priority over the trait method with the same name in LayoutOf, + /// and allows wrapping the actual [LayoutOf::layout_of] with a tracing span. + /// See [LayoutOf::layout_of] for the original documentation. + #[inline(always)] + pub fn layout_of(&self, ty: Ty<'tcx>) -> <Self as LayoutOfHelpers<'tcx>>::LayoutOfResult { + let _span = enter_trace_span!(M, "InterpCx::layout_of", ty = ?ty.kind()); + LayoutOf::layout_of(self, ty) + } + + /// This inherent method takes priority over the trait method with the same name in FnAbiOf, + /// and allows wrapping the actual [FnAbiOf::fn_abi_of_fn_ptr] with a tracing span. + /// See [FnAbiOf::fn_abi_of_fn_ptr] for the original documentation. + #[inline(always)] + pub fn fn_abi_of_fn_ptr( + &self, + sig: ty::PolyFnSig<'tcx>, + extra_args: &'tcx ty::List<Ty<'tcx>>, + ) -> <Self as FnAbiOfHelpers<'tcx>>::FnAbiOfResult { + let _span = enter_trace_span!(M, "InterpCx::fn_abi_of_fn_ptr", ?sig, ?extra_args); + FnAbiOf::fn_abi_of_fn_ptr(self, sig, extra_args) + } + + /// This inherent method takes priority over the trait method with the same name in FnAbiOf, + /// and allows wrapping the actual [FnAbiOf::fn_abi_of_instance] with a tracing span. + /// See [FnAbiOf::fn_abi_of_instance] for the original documentation. + #[inline(always)] + pub fn fn_abi_of_instance( + &self, + instance: ty::Instance<'tcx>, + extra_args: &'tcx ty::List<Ty<'tcx>>, + ) -> <Self as FnAbiOfHelpers<'tcx>>::FnAbiOfResult { + let _span = enter_trace_span!(M, "InterpCx::fn_abi_of_instance", ?instance, ?extra_args); + FnAbiOf::fn_abi_of_instance(self, instance, extra_args) + } +} + /// Test if it is valid for a MIR assignment to assign `src`-typed place to `dest`-typed value. /// This test should be symmetric, as it is primarily about layout compatibility. pub(super) fn mir_assign_valid_types<'tcx>( @@ -272,7 +295,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let def = instance.def_id(); &self.tcx.promoted_mir(def)[promoted] } else { - M::load_mir(self, instance)? + M::load_mir(self, instance) }; // do not continue if typeck errors occurred (can only occur in local crate) if let Some(err) = body.tainted_by_errors { diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index f0f958d069e..bb59b9f5418 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -26,21 +26,18 @@ use rustc_middle::ty::layout::TyAndLayout; use rustc_span::def_id::LocalDefId; use tracing::{instrument, trace}; -use super::{ - AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, err_ub, interp_ok, -}; -use crate::const_eval; +use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, interp_ok}; use crate::const_eval::DummyMachine; -use crate::errors::NestedStaticInThreadLocal; +use crate::{const_eval, errors}; -pub trait CompileTimeMachine<'tcx, T> = Machine< +pub trait CompileTimeMachine<'tcx> = Machine< 'tcx, - MemoryKind = T, + MemoryKind = const_eval::MemoryKind, Provenance = CtfeProvenance, ExtraFnVal = !, FrameExtra = (), AllocExtra = (), - MemoryMap = FxIndexMap<AllocId, (MemoryKind<T>, Allocation)>, + MemoryMap = FxIndexMap<AllocId, (MemoryKind<const_eval::MemoryKind>, Allocation)>, > + HasStaticRootDefId; pub trait HasStaticRootDefId { @@ -62,18 +59,32 @@ impl HasStaticRootDefId for const_eval::CompileTimeMachine<'_> { /// already mutable (as a sanity check). /// /// Returns an iterator over all relocations referred to by this allocation. -fn intern_shallow<'tcx, T, M: CompileTimeMachine<'tcx, T>>( +fn intern_shallow<'tcx, M: CompileTimeMachine<'tcx>>( ecx: &mut InterpCx<'tcx, M>, alloc_id: AllocId, mutability: Mutability, disambiguator: Option<&mut DisambiguatorState>, -) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, ()> { +) -> Result<impl Iterator<Item = CtfeProvenance> + 'tcx, InternError> { trace!("intern_shallow {:?}", alloc_id); // remove allocation // FIXME(#120456) - is `swap_remove` correct? - let Some((_kind, mut alloc)) = ecx.memory.alloc_map.swap_remove(&alloc_id) else { - return Err(()); + let Some((kind, mut alloc)) = ecx.memory.alloc_map.swap_remove(&alloc_id) else { + return Err(InternError::DanglingPointer); }; + + match kind { + MemoryKind::Machine(const_eval::MemoryKind::Heap { was_made_global }) => { + if !was_made_global { + // Attempting to intern a `const_allocate`d pointer that was not made global via + // `const_make_global`. We want to error here, but we have to first put the + // allocation back into the `alloc_map` to keep things in a consistent state. + ecx.memory.alloc_map.insert(alloc_id, (kind, alloc)); + return Err(InternError::ConstAllocNotGlobal); + } + } + MemoryKind::Stack | MemoryKind::CallerLocation => {} + } + // Set allocation mutability as appropriate. This is used by LLVM to put things into // read-only memory, and also by Miri when evaluating other globals that // access this one. @@ -99,7 +110,7 @@ fn intern_shallow<'tcx, T, M: CompileTimeMachine<'tcx, T>>( } else { ecx.tcx.set_alloc_id_memory(alloc_id, alloc); } - Ok(alloc.0.0.provenance().ptrs().iter().map(|&(_, prov)| prov)) + Ok(alloc.inner().provenance().ptrs().iter().map(|&(_, prov)| prov)) } /// Creates a new `DefId` and feeds all the right queries to make this `DefId` @@ -125,7 +136,7 @@ fn intern_as_new_static<'tcx>( tcx.set_nested_alloc_id_static(alloc_id, feed.def_id()); if tcx.is_thread_local_static(static_id.into()) { - tcx.dcx().emit_err(NestedStaticInThreadLocal { span: tcx.def_span(static_id) }); + tcx.dcx().emit_err(errors::NestedStaticInThreadLocal { span: tcx.def_span(static_id) }); } // These do not inherit the codegen attrs of the parent static allocation, since @@ -151,9 +162,10 @@ pub enum InternKind { } #[derive(Debug)] -pub enum InternResult { - FoundBadMutablePointer, - FoundDanglingPointer, +pub enum InternError { + BadMutablePointer, + DanglingPointer, + ConstAllocNotGlobal, } /// Intern `ret` and everything it references. @@ -163,11 +175,11 @@ pub enum InternResult { /// /// For `InternKind::Static` the root allocation will not be interned, but must be handled by the caller. #[instrument(level = "debug", skip(ecx))] -pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx, const_eval::MemoryKind>>( +pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx>>( ecx: &mut InterpCx<'tcx, M>, intern_kind: InternKind, ret: &MPlaceTy<'tcx>, -) -> Result<(), InternResult> { +) -> Result<(), InternError> { let mut disambiguator = DisambiguatorState::new(); // We are interning recursively, and for mutability we are distinguishing the "root" allocation @@ -181,7 +193,7 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx, const_eval } InternKind::Static(Mutability::Not) => { ( - // Outermost allocation is mutable if `!Freeze`. + // Outermost allocation is mutable if `!Freeze` i.e. contains interior mutable types. if ret.layout.ty.is_freeze(*ecx.tcx, ecx.typing_env) { Mutability::Not } else { @@ -224,6 +236,7 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx, const_eval // We want to first report "dangling" and then "mutable", so we need to delay reporting these // errors. let mut result = Ok(()); + let mut found_bad_mutable_ptr = false; // Keep interning as long as there are things to intern. // We show errors if there are dangling pointers, or mutable pointers in immutable contexts @@ -278,18 +291,7 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx, const_eval // when there is memory there that someone might expect to be mutable, but we make it immutable. let dangling = !is_already_global && !ecx.memory.alloc_map.contains_key(&alloc_id); if !dangling { - // Found a mutable pointer inside a const where inner allocations should be - // immutable. - if !ecx.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you { - span_bug!( - ecx.tcx.span, - "the static const safety checks accepted a mutable pointer they should not have accepted" - ); - } - // Prefer dangling pointer errors over mutable pointer errors - if result.is_ok() { - result = Err(InternResult::FoundBadMutablePointer); - } + found_bad_mutable_ptr = true; } } if ecx.tcx.try_get_global_alloc(alloc_id).is_some() { @@ -310,18 +312,31 @@ pub fn intern_const_alloc_recursive<'tcx, M: CompileTimeMachine<'tcx, const_eval just_interned.insert(alloc_id); match intern_shallow(ecx, alloc_id, inner_mutability, Some(&mut disambiguator)) { Ok(nested) => todo.extend(nested), - Err(()) => { - ecx.tcx.dcx().delayed_bug("found dangling pointer during const interning"); - result = Err(InternResult::FoundDanglingPointer); + Err(err) => { + ecx.tcx.dcx().delayed_bug("error during const interning"); + result = Err(err); } } } + if found_bad_mutable_ptr && result.is_ok() { + // We found a mutable pointer inside a const where inner allocations should be immutable, + // and there was no other error. This should usually never happen! However, this can happen + // in unleash-miri mode, so report it as a normal error then. + if ecx.tcx.sess.opts.unstable_opts.unleash_the_miri_inside_of_you { + result = Err(InternError::BadMutablePointer); + } else { + span_bug!( + ecx.tcx.span, + "the static const safety checks accepted a mutable pointer they should not have accepted" + ); + } + } result } /// Intern `ret`. This function assumes that `ret` references no other allocation. #[instrument(level = "debug", skip(ecx))] -pub fn intern_const_alloc_for_constprop<'tcx, T, M: CompileTimeMachine<'tcx, T>>( +pub fn intern_const_alloc_for_constprop<'tcx, M: CompileTimeMachine<'tcx>>( ecx: &mut InterpCx<'tcx, M>, alloc_id: AllocId, ) -> InterpResult<'tcx, ()> { @@ -330,10 +345,7 @@ pub fn intern_const_alloc_for_constprop<'tcx, T, M: CompileTimeMachine<'tcx, T>> return interp_ok(()); } // Move allocation to `tcx`. - if let Some(_) = intern_shallow(ecx, alloc_id, Mutability::Not, None) - .map_err(|()| err_ub!(DeadLocal))? - .next() - { + if let Some(_) = intern_shallow(ecx, alloc_id, Mutability::Not, None).unwrap().next() { // We are not doing recursive interning, so we don't currently support provenance. // (If this assertion ever triggers, we should just implement a // proper recursive interning loop -- or just call `intern_const_alloc_recursive`. diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index 378ed6d0e10..e24a355891d 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -4,8 +4,9 @@ use std::assert_matches::assert_matches; -use rustc_abi::Size; +use rustc_abi::{FieldIdx, HasDataLayout, Size}; use rustc_apfloat::ieee::{Double, Half, Quad, Single}; +use rustc_middle::mir::interpret::{read_target_uint, write_target_uint}; use rustc_middle::mir::{self, BinOp, ConstValue, NonDivergingIntrinsic}; use rustc_middle::ty::layout::TyAndLayout; use rustc_middle::ty::{Ty, TyCtxt}; @@ -28,8 +29,85 @@ pub(crate) fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ConstAll let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes(), ()); tcx.mk_const_alloc(alloc) } - impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { + /// Generates a value of `TypeId` for `ty` in-place. + fn write_type_id( + &mut self, + ty: Ty<'tcx>, + dest: &PlaceTy<'tcx, M::Provenance>, + ) -> InterpResult<'tcx, ()> { + let tcx = self.tcx; + let type_id_hash = tcx.type_id_hash(ty).as_u128(); + let op = self.const_val_to_op( + ConstValue::Scalar(Scalar::from_u128(type_id_hash)), + tcx.types.u128, + None, + )?; + self.copy_op_allow_transmute(&op, dest)?; + + // Give the each pointer-sized chunk provenance that knows about the type id. + // Here we rely on `TypeId` being a newtype around an array of pointers, so we + // first project to its only field and then the array elements. + let alloc_id = tcx.reserve_and_set_type_id_alloc(ty); + let arr = self.project_field(dest, FieldIdx::ZERO)?; + let mut elem_iter = self.project_array_fields(&arr)?; + while let Some((_, elem)) = elem_iter.next(self)? { + // Decorate this part of the hash with provenance; leave the integer part unchanged. + let hash_fragment = self.read_scalar(&elem)?.to_target_usize(&tcx)?; + let ptr = Pointer::new(alloc_id.into(), Size::from_bytes(hash_fragment)); + let ptr = self.global_root_pointer(ptr)?; + let val = Scalar::from_pointer(ptr, &tcx); + self.write_scalar(val, &elem)?; + } + interp_ok(()) + } + + /// Read a value of type `TypeId`, returning the type it represents. + pub(crate) fn read_type_id( + &self, + op: &OpTy<'tcx, M::Provenance>, + ) -> InterpResult<'tcx, Ty<'tcx>> { + // `TypeId` is a newtype around an array of pointers. All pointers must have the same + // provenance, and that provenance represents the type. + let ptr_size = self.pointer_size().bytes_usize(); + let arr = self.project_field(op, FieldIdx::ZERO)?; + + let mut ty_and_hash = None; + let mut elem_iter = self.project_array_fields(&arr)?; + while let Some((idx, elem)) = elem_iter.next(self)? { + let elem = self.read_pointer(&elem)?; + let (elem_ty, elem_hash) = self.get_ptr_type_id(elem)?; + // If this is the first element, remember the type and its hash. + // If this is not the first element, ensure it is consistent with the previous ones. + let full_hash = match ty_and_hash { + None => { + let hash = self.tcx.type_id_hash(elem_ty).as_u128(); + let mut hash_bytes = [0u8; 16]; + write_target_uint(self.data_layout().endian, &mut hash_bytes, hash).unwrap(); + ty_and_hash = Some((elem_ty, hash_bytes)); + hash_bytes + } + Some((ty, hash_bytes)) => { + if ty != elem_ty { + throw_ub_format!( + "invalid `TypeId` value: not all bytes carry the same type id metadata" + ); + } + hash_bytes + } + }; + // Ensure the elem_hash matches the corresponding part of the full hash. + let hash_frag = &full_hash[(idx as usize) * ptr_size..][..ptr_size]; + if read_target_uint(self.data_layout().endian, hash_frag).unwrap() != elem_hash.into() { + throw_ub_format!( + "invalid `TypeId` value: the hash does not match the type id metadata" + ); + } + } + + interp_ok(ty_and_hash.unwrap().0) + } + /// Returns `true` if emulation happened. /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own /// intrinsic handling. @@ -63,9 +141,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { sym::type_id => { let tp_ty = instance.args.type_at(0); ensure_monomorphic_enough(tcx, tp_ty)?; - let val = ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128()); - let val = self.const_val_to_op(val, dest.layout.ty, Some(dest.layout))?; - self.copy_op(&val, dest)?; + self.write_type_id(tp_ty, dest)?; + } + sym::type_id_eq => { + let a_ty = self.read_type_id(&args[0])?; + let b_ty = self.read_type_id(&args[1])?; + self.write_scalar(Scalar::from_bool(a_ty == b_ty), dest)?; } sym::variant_count => { let tp_ty = instance.args.type_at(0); diff --git a/compiler/rustc_const_eval/src/interpret/machine.rs b/compiler/rustc_const_eval/src/interpret/machine.rs index 35ec303f961..e981f3973ae 100644 --- a/compiler/rustc_const_eval/src/interpret/machine.rs +++ b/compiler/rustc_const_eval/src/interpret/machine.rs @@ -18,8 +18,8 @@ use rustc_target::callconv::FnAbi; use super::{ AllocBytes, AllocId, AllocKind, AllocRange, Allocation, CTFE_ALLOC_SALT, ConstAllocation, - CtfeProvenance, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, MemoryKind, - Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, interp_ok, throw_unsup, + CtfeProvenance, EnteredTraceSpan, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, + MemoryKind, Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, interp_ok, throw_unsup, }; /// Data returned by [`Machine::after_stack_pop`], and consumed by @@ -147,12 +147,6 @@ pub trait Machine<'tcx>: Sized { /// already been checked before. const ALL_CONSTS_ARE_PRECHECKED: bool = true; - /// Determines whether rustc_const_eval functions that make use of the [Machine] should make - /// tracing calls (to the `tracing` library). By default this is `false`, meaning the tracing - /// calls will supposedly be optimized out. This flag is set to `true` inside Miri, to allow - /// tracing the interpretation steps, among other things. - const TRACING_ENABLED: bool = false; - /// Whether memory accesses should be alignment-checked. fn enforce_alignment(ecx: &InterpCx<'tcx, Self>) -> bool; @@ -189,8 +183,8 @@ pub trait Machine<'tcx>: Sized { fn load_mir( ecx: &InterpCx<'tcx, Self>, instance: ty::InstanceKind<'tcx>, - ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> { - interp_ok(ecx.tcx.instance_mir(instance)) + ) -> &'tcx mir::Body<'tcx> { + ecx.tcx.instance_mir(instance) } /// Entry point to all function calls. @@ -634,6 +628,17 @@ pub trait Machine<'tcx>: Sized { /// Compute the value passed to the constructors of the `AllocBytes` type for /// abstract machine allocations. fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams; + + /// Allows enabling/disabling tracing calls from within `rustc_const_eval` at compile time, by + /// delegating the entering of [tracing::Span]s to implementors of the [Machine] trait. The + /// default implementation corresponds to tracing being disabled, meaning the tracing calls will + /// supposedly be optimized out completely. To enable tracing, override this trait method and + /// return `span.entered()`. Also see [crate::enter_trace_span]. + #[must_use] + #[inline(always)] + fn enter_trace_span(_span: impl FnOnce() -> tracing::Span) -> impl EnteredTraceSpan { + () + } } /// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines @@ -644,6 +649,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) { type ExtraFnVal = !; + type MemoryKind = $crate::const_eval::MemoryKind; type MemoryMap = rustc_data_structures::fx::FxIndexMap<AllocId, (MemoryKind<Self::MemoryKind>, Allocation)>; const GLOBAL_KIND: Option<Self::MemoryKind> = None; // no copying of globals from `tcx` to machine memory diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index c97d53a45de..47228de5213 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -15,9 +15,9 @@ use std::{fmt, ptr}; use rustc_abi::{Align, HasDataLayout, Size}; use rustc_ast::Mutability; use rustc_data_structures::fx::{FxHashSet, FxIndexMap}; -use rustc_middle::bug; use rustc_middle::mir::display_allocation; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; +use rustc_middle::{bug, throw_ub_format}; use tracing::{debug, instrument, trace}; use super::{ @@ -26,6 +26,7 @@ use super::{ Misalignment, Pointer, PointerArithmetic, Provenance, Scalar, alloc_range, err_ub, err_ub_custom, interp_ok, throw_ub, throw_ub_custom, throw_unsup, throw_unsup_format, }; +use crate::const_eval::ConstEvalErrKind; use crate::fluent_generated as fluent; #[derive(Debug, PartialEq, Copy, Clone)] @@ -311,6 +312,51 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { interp_ok(new_ptr) } + /// Mark the `const_allocate`d allocation `ptr` points to as immutable so we can intern it. + pub fn make_const_heap_ptr_global( + &mut self, + ptr: Pointer<Option<CtfeProvenance>>, + ) -> InterpResult<'tcx> + where + M: Machine<'tcx, MemoryKind = crate::const_eval::MemoryKind, Provenance = CtfeProvenance>, + { + let (alloc_id, offset, _) = self.ptr_get_alloc_id(ptr, 0)?; + if offset.bytes() != 0 { + return Err(ConstEvalErrKind::ConstMakeGlobalWithOffset(ptr)).into(); + } + + if matches!(self.tcx.try_get_global_alloc(alloc_id), Some(_)) { + // This points to something outside the current interpreter. + return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into(); + } + + // If we can't find it in `alloc_map` it must be dangling (because we don't use + // `extra_fn_ptr_map` in const-eval). + let (kind, alloc) = self + .memory + .alloc_map + .get_mut_or(alloc_id, || Err(ConstEvalErrKind::ConstMakeGlobalWithDanglingPtr(ptr)))?; + + // Ensure this is actually a *heap* allocation, and record it as made-global. + match kind { + MemoryKind::Stack | MemoryKind::CallerLocation => { + return Err(ConstEvalErrKind::ConstMakeGlobalPtrIsNonHeap(ptr)).into(); + } + MemoryKind::Machine(crate::const_eval::MemoryKind::Heap { was_made_global }) => { + if *was_made_global { + return Err(ConstEvalErrKind::ConstMakeGlobalPtrAlreadyMadeGlobal(alloc_id)) + .into(); + } + *was_made_global = true; + } + } + + // Prevent further mutation, this is now an immutable global. + alloc.mutability = Mutability::Not; + + interp_ok(()) + } + #[instrument(skip(self), level = "debug")] pub fn deallocate_ptr( &mut self, @@ -346,6 +392,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { kind = "vtable", ) } + Some(GlobalAlloc::TypeId { .. }) => { + err_ub_custom!( + fluent::const_eval_invalid_dealloc, + alloc_id = alloc_id, + kind = "typeid", + ) + } Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => { err_ub_custom!( fluent::const_eval_invalid_dealloc, @@ -615,6 +668,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } Some(GlobalAlloc::Function { .. }) => throw_ub!(DerefFunctionPointer(id)), Some(GlobalAlloc::VTable(..)) => throw_ub!(DerefVTablePointer(id)), + Some(GlobalAlloc::TypeId { .. }) => throw_ub!(DerefTypeIdPointer(id)), None => throw_ub!(PointerUseAfterFree(id, CheckInAllocMsg::MemoryAccess)), Some(GlobalAlloc::Static(def_id)) => { assert!(self.tcx.is_static(def_id)); @@ -882,7 +936,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { if let Some(fn_val) = self.get_fn_alloc(id) { let align = match fn_val { FnVal::Instance(instance) => { - self.tcx.codegen_fn_attrs(instance.def_id()).alignment.unwrap_or(Align::ONE) + self.tcx.codegen_instance_attrs(instance.def).alignment.unwrap_or(Align::ONE) } // Machine-specific extra functions currently do not support alignment restrictions. FnVal::Other(_) => Align::ONE, @@ -896,7 +950,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let (size, align) = global_alloc.size_and_align(*self.tcx, self.typing_env); let mutbl = global_alloc.mutability(*self.tcx, self.typing_env); let kind = match global_alloc { - GlobalAlloc::Static { .. } | GlobalAlloc::Memory { .. } => AllocKind::LiveData, + GlobalAlloc::TypeId { .. } + | GlobalAlloc::Static { .. } + | GlobalAlloc::Memory { .. } => AllocKind::LiveData, GlobalAlloc::Function { .. } => bug!("We already checked function pointers above"), GlobalAlloc::VTable { .. } => AllocKind::VTable, }; @@ -936,6 +992,19 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { } } + /// Takes a pointer that is the first chunk of a `TypeId` and return the type that its + /// provenance refers to, as well as the segment of the hash that this pointer covers. + pub fn get_ptr_type_id( + &self, + ptr: Pointer<Option<M::Provenance>>, + ) -> InterpResult<'tcx, (Ty<'tcx>, u64)> { + let (alloc_id, offset, _meta) = self.ptr_get_alloc_id(ptr, 0)?; + let GlobalAlloc::TypeId { ty } = self.tcx.global_alloc(alloc_id) else { + throw_ub_format!("invalid `TypeId` value: not all bytes carry type id metadata") + }; + interp_ok((ty, offset.bytes())) + } + pub fn get_ptr_fn( &self, ptr: Pointer<Option<M::Provenance>>, @@ -1197,6 +1266,9 @@ impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for DumpAllocs<'a, 'tcx, M> { Some(GlobalAlloc::VTable(ty, dyn_ty)) => { write!(fmt, " (vtable: impl {dyn_ty} for {ty})")?; } + Some(GlobalAlloc::TypeId { ty }) => { + write!(fmt, " (typeid for {ty})")?; + } Some(GlobalAlloc::Static(did)) => { write!(fmt, " (static: {})", self.ecx.tcx.def_path_str(did))?; } @@ -1233,7 +1305,7 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> /// `offset` is relative to this allocation reference, not the base of the allocation. pub fn write_ptr_sized(&mut self, offset: Size, val: Scalar<Prov>) -> InterpResult<'tcx> { - self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size), val) + self.write_scalar(alloc_range(offset, self.tcx.data_layout().pointer_size()), val) } /// Mark the given sub-range (relative to this allocation reference) as uninitialized. @@ -1285,7 +1357,7 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr /// `offset` is relative to this allocation reference, not the base of the allocation. pub fn read_pointer(&self, offset: Size) -> InterpResult<'tcx, Scalar<Prov>> { self.read_scalar( - alloc_range(offset, self.tcx.data_layout().pointer_size), + alloc_range(offset, self.tcx.data_layout().pointer_size()), /*read_provenance*/ true, ) } @@ -1472,7 +1544,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { dest_alloc .write_uninit(&tcx, dest_range) .map_err(|e| e.to_interp_error(dest_alloc_id))?; - // We can forget about the provenance, this is all not initialized anyway. + // `write_uninit` also resets the provenance, so we are done. return interp_ok(()); } diff --git a/compiler/rustc_const_eval/src/interpret/mod.rs b/compiler/rustc_const_eval/src/interpret/mod.rs index f8b3c92debb..2f365ec77b3 100644 --- a/compiler/rustc_const_eval/src/interpret/mod.rs +++ b/compiler/rustc_const_eval/src/interpret/mod.rs @@ -26,7 +26,7 @@ pub use self::call::FnArg; pub use self::eval_context::{InterpCx, format_interp_error}; use self::eval_context::{from_known_layout, mir_assign_valid_types}; pub use self::intern::{ - HasStaticRootDefId, InternKind, InternResult, intern_const_alloc_for_constprop, + HasStaticRootDefId, InternError, InternKind, intern_const_alloc_for_constprop, intern_const_alloc_recursive, }; pub use self::machine::{AllocMap, Machine, MayLeak, ReturnAction, compile_time_machine}; @@ -36,7 +36,8 @@ pub use self::operand::{ImmTy, Immediate, OpTy}; pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable}; use self::place::{MemPlace, Place}; pub use self::projection::{OffsetMode, Projectable}; -pub use self::stack::{Frame, FrameInfo, LocalState, StackPopCleanup, StackPopInfo}; +pub use self::stack::{Frame, FrameInfo, LocalState, ReturnContinuation, StackPopInfo}; +pub use self::util::EnteredTraceSpan; pub(crate) use self::util::create_static_alloc; pub use self::validity::{CtfeValidationMode, RangeSet, RefTracking}; pub use self::visitor::ValueVisitor; diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 306697d4ec9..f72c4418081 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -296,7 +296,11 @@ where base: &'a P, ) -> InterpResult<'tcx, ArrayIterator<'a, 'tcx, M::Provenance, P>> { let abi::FieldsShape::Array { stride, .. } = base.layout().fields else { - span_bug!(self.cur_span(), "project_array_fields: expected an array layout"); + span_bug!( + self.cur_span(), + "project_array_fields: expected an array layout, got {:#?}", + base.layout() + ); }; let len = base.len(self)?; let field_layout = base.layout().field(self, 0); diff --git a/compiler/rustc_const_eval/src/interpret/stack.rs b/compiler/rustc_const_eval/src/interpret/stack.rs index 543d68d7f45..2e99bb4209f 100644 --- a/compiler/rustc_const_eval/src/interpret/stack.rs +++ b/compiler/rustc_const_eval/src/interpret/stack.rs @@ -12,6 +12,7 @@ use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::{bug, mir}; use rustc_mir_dataflow::impls::always_storage_live_locals; use rustc_span::Span; +use tracing::field::Empty; use tracing::{info_span, instrument, trace}; use super::{ @@ -72,8 +73,8 @@ pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { //////////////////////////////////////////////////////////////////////////////// // Return place and locals //////////////////////////////////////////////////////////////////////////////// - /// Work to perform when returning from this function. - return_to_block: StackPopCleanup, + /// Where to continue when returning from this function. + return_cont: ReturnContinuation, /// The location where the result of the current stack frame should be written to, /// and its layout in the caller. This place is to be interpreted relative to the @@ -106,19 +107,19 @@ pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> { pub(super) loc: Either<mir::Location, Span>, } +/// Where and how to continue when returning/unwinding from the current function. #[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these -pub enum StackPopCleanup { +pub enum ReturnContinuation { /// Jump to the next block in the caller, or cause UB if None (that's a function - /// that may never return). Also store layout of return place so - /// we can validate it at that layout. + /// that may never return). /// `ret` stores the block we jump to on a normal return, while `unwind` /// stores the block used for cleanup during unwinding. Goto { ret: Option<mir::BasicBlock>, unwind: mir::UnwindAction }, - /// The root frame of the stack: nowhere else to jump to. + /// The root frame of the stack: nowhere else to jump to, so we stop. /// `cleanup` says whether locals are deallocated. Static computation /// wants them leaked to intern what they need (and just throw away /// the entire `ecx` when it is done). - Root { cleanup: bool }, + Stop { cleanup: bool }, } /// Return type of [`InterpCx::pop_stack_frame_raw`]. @@ -127,8 +128,8 @@ pub struct StackPopInfo<'tcx, Prov: Provenance> { /// stack frame. pub return_action: ReturnAction, - /// [`return_to_block`](Frame::return_to_block) of the popped stack frame. - pub return_to_block: StackPopCleanup, + /// [`return_cont`](Frame::return_cont) of the popped stack frame. + pub return_cont: ReturnContinuation, /// [`return_place`](Frame::return_place) of the popped stack frame. pub return_place: PlaceTy<'tcx, Prov>, @@ -255,7 +256,7 @@ impl<'tcx, Prov: Provenance> Frame<'tcx, Prov> { Frame { body: self.body, instance: self.instance, - return_to_block: self.return_to_block, + return_cont: self.return_cont, return_place: self.return_place, locals: self.locals, loc: self.loc, @@ -350,20 +351,20 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// the arguments or local variables. /// /// The high-level version of this is `init_stack_frame`. - #[instrument(skip(self, body, return_place, return_to_block), level = "debug")] + #[instrument(skip(self, body, return_place, return_cont), level = "debug")] pub(crate) fn push_stack_frame_raw( &mut self, instance: ty::Instance<'tcx>, body: &'tcx mir::Body<'tcx>, return_place: &PlaceTy<'tcx, M::Provenance>, - return_to_block: StackPopCleanup, + return_cont: ReturnContinuation, ) -> InterpResult<'tcx> { trace!("body: {:#?}", body); // We can push a `Root` frame if and only if the stack is empty. debug_assert_eq!( self.stack().is_empty(), - matches!(return_to_block, StackPopCleanup::Root { .. }) + matches!(return_cont, ReturnContinuation::Stop { .. }) ); // First push a stack frame so we have access to `instantiate_from_current_frame` and other @@ -373,7 +374,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let pre_frame = Frame { body, loc: Right(body.span), // Span used for errors caused during preamble. - return_to_block, + return_cont, return_place: return_place.clone(), locals, instance, @@ -396,7 +397,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { // Finish things up. M::after_stack_push(self)?; self.frame_mut().loc = Left(mir::Location::START); - let span = info_span!("frame", "{}", instance); + // `tracing_separate_thread` is used to instruct the chrome_tracing [tracing::Layer] in Miri + // to put the "frame" span on a separate trace thread/line than other spans, to make the + // visualization in https://ui.perfetto.dev easier to interpret. It is set to a value of + // [tracing::field::Empty] so that other tracing layers (e.g. the logger) will ignore it. + let span = info_span!("frame", tracing_separate_thread = Empty, "{}", instance); self.frame_mut().tracing_span.enter(span); interp_ok(()) @@ -429,15 +434,15 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { copy_ret_val(self, &frame.return_place)?; } - let return_to_block = frame.return_to_block; + let return_cont = frame.return_cont; let return_place = frame.return_place.clone(); // Cleanup: deallocate locals. // Usually we want to clean up (deallocate locals), but in a few rare cases we don't. // We do this while the frame is still on the stack, so errors point to the callee. - let cleanup = match return_to_block { - StackPopCleanup::Goto { .. } => true, - StackPopCleanup::Root { cleanup, .. } => cleanup, + let cleanup = match return_cont { + ReturnContinuation::Goto { .. } => true, + ReturnContinuation::Stop { cleanup, .. } => cleanup, }; let return_action = if cleanup { @@ -455,7 +460,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { ReturnAction::NoCleanup }; - interp_ok(StackPopInfo { return_action, return_to_block, return_place }) + interp_ok(StackPopInfo { return_action, return_cont, return_place }) } /// In the current stack frame, mark all locals as live that are not arguments and don't have diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index 833fcc38817..629dcc3523c 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -5,7 +5,6 @@ use either::Either; use rustc_abi::{FIRST_VARIANT, FieldIdx}; use rustc_index::IndexSlice; -use rustc_middle::ty::layout::FnAbiOf; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::{bug, mir, span_bug}; use rustc_span::source_map::Spanned; diff --git a/compiler/rustc_const_eval/src/interpret/util.rs b/compiler/rustc_const_eval/src/interpret/util.rs index 99add01f95c..72650d545c3 100644 --- a/compiler/rustc_const_eval/src/interpret/util.rs +++ b/compiler/rustc_const_eval/src/interpret/util.rs @@ -46,21 +46,20 @@ pub(crate) fn create_static_alloc<'tcx>( interp_ok(ecx.ptr_to_mplace(Pointer::from(alloc_id).into(), layout)) } -/// This struct is needed to enforce `#[must_use]` on [tracing::span::EnteredSpan] -/// while wrapping them in an `Option`. -#[must_use] -pub enum MaybeEnteredSpan { - Some(tracing::span::EnteredSpan), - None, -} +/// A marker trait returned by [crate::interpret::Machine::enter_trace_span], identifying either a +/// real [tracing::span::EnteredSpan] in case tracing is enabled, or the dummy type `()` when +/// tracing is disabled. +pub trait EnteredTraceSpan {} +impl EnteredTraceSpan for () {} +impl EnteredTraceSpan for tracing::span::EnteredSpan {} +/// Shortand for calling [crate::interpret::Machine::enter_trace_span] on a [tracing::info_span]. +/// This is supposed to be compiled out when [crate::interpret::Machine::enter_trace_span] has the +/// default implementation (i.e. when it does not actually enter the span but instead returns `()`). +/// Note: the result of this macro **must be used** because the span is exited when it's dropped. #[macro_export] macro_rules! enter_trace_span { ($machine:ident, $($tt:tt)*) => { - if $machine::TRACING_ENABLED { - $crate::interpret::util::MaybeEnteredSpan::Some(tracing::info_span!($($tt)*).entered()) - } else { - $crate::interpret::util::MaybeEnteredSpan::None - } + $machine::enter_trace_span(|| tracing::info_span!($($tt)*)) } } diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index fc4d13af8c4..62f591ceaa9 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -558,7 +558,15 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { { // Everything should be already interned. let Some(global_alloc) = self.ecx.tcx.try_get_global_alloc(alloc_id) else { - assert!(self.ecx.memory.alloc_map.get(alloc_id).is_none()); + if self.ecx.memory.alloc_map.contains_key(&alloc_id) { + // This can happen when interning didn't complete due to, e.g. + // missing `make_global`. This must mean other errors are already + // being reported. + self.ecx.tcx.dcx().delayed_bug( + "interning did not complete, there should be an error", + ); + return interp_ok(()); + } // We can't have *any* references to non-existing allocations in const-eval // as the rest of rustc isn't happy with them... so we throw an error, even // though for zero-sized references this isn't really UB. @@ -571,40 +579,42 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> { let alloc_actual_mutbl = global_alloc.mutability(*self.ecx.tcx, self.ecx.typing_env); - if let GlobalAlloc::Static(did) = global_alloc { - let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else { - bug!() - }; - // Special handling for pointers to statics (irrespective of their type). - assert!(!self.ecx.tcx.is_thread_local_static(did)); - assert!(self.ecx.tcx.is_static(did)); - // Mode-specific checks - match ctfe_mode { - CtfeValidationMode::Static { .. } - | CtfeValidationMode::Promoted { .. } => { - // We skip recursively checking other statics. These statics must be sound by - // themselves, and the only way to get broken statics here is by using - // unsafe code. - // The reasons we don't check other statics is twofold. For one, in all - // sound cases, the static was already validated on its own, and second, we - // trigger cycle errors if we try to compute the value of the other static - // and that static refers back to us (potentially through a promoted). - // This could miss some UB, but that's fine. - // We still walk nested allocations, as they are fundamentally part of this validation run. - // This means we will also recurse into nested statics of *other* - // statics, even though we do not recurse into other statics directly. - // That's somewhat inconsistent but harmless. - skip_recursive_check = !nested; - } - CtfeValidationMode::Const { .. } => { - // If this is mutable memory or an `extern static`, there's no point in checking it -- we'd - // just get errors trying to read the value. - if alloc_actual_mutbl.is_mut() || self.ecx.tcx.is_foreign_item(did) - { - skip_recursive_check = true; + match global_alloc { + GlobalAlloc::Static(did) => { + let DefKind::Static { nested, .. } = self.ecx.tcx.def_kind(did) else { + bug!() + }; + assert!(!self.ecx.tcx.is_thread_local_static(did)); + assert!(self.ecx.tcx.is_static(did)); + match ctfe_mode { + CtfeValidationMode::Static { .. } + | CtfeValidationMode::Promoted { .. } => { + // We skip recursively checking other statics. These statics must be sound by + // themselves, and the only way to get broken statics here is by using + // unsafe code. + // The reasons we don't check other statics is twofold. For one, in all + // sound cases, the static was already validated on its own, and second, we + // trigger cycle errors if we try to compute the value of the other static + // and that static refers back to us (potentially through a promoted). + // This could miss some UB, but that's fine. + // We still walk nested allocations, as they are fundamentally part of this validation run. + // This means we will also recurse into nested statics of *other* + // statics, even though we do not recurse into other statics directly. + // That's somewhat inconsistent but harmless. + skip_recursive_check = !nested; + } + CtfeValidationMode::Const { .. } => { + // If this is mutable memory or an `extern static`, there's no point in checking it -- we'd + // just get errors trying to read the value. + if alloc_actual_mutbl.is_mut() + || self.ecx.tcx.is_foreign_item(did) + { + skip_recursive_check = true; + } } } } + _ => (), } // If this allocation has size zero, there is no actual mutability here. diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs index 4ca39bbc68e..b1f29598750 100644 --- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs +++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs @@ -52,9 +52,9 @@ fn check_validity_requirement_strict<'tcx>( let mut cx = InterpCx::new(cx.tcx(), DUMMY_SP, cx.typing_env, machine); - let allocated = cx - .allocate(ty, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap)) - .expect("OOM: failed to allocate for uninit check"); + // It doesn't really matter which `MemoryKind` we use here, `Stack` is the least wrong. + let allocated = + cx.allocate(ty, MemoryKind::Stack).expect("OOM: failed to allocate for uninit check"); if kind == ValidityRequirement::Zero { cx.write_bytes_ptr( @@ -184,9 +184,10 @@ pub(crate) fn validate_scalar_in_layout<'tcx>( let Ok(layout) = cx.layout_of(ty) else { bug!("could not compute layout of {scalar:?}:{ty:?}") }; - let allocated = cx - .allocate(layout, MemoryKind::Machine(crate::const_eval::MemoryKind::Heap)) - .expect("OOM: failed to allocate for uninit check"); + + // It doesn't really matter which `MemoryKind` we use here, `Stack` is the least wrong. + let allocated = + cx.allocate(layout, MemoryKind::Stack).expect("OOM: failed to allocate for uninit check"); cx.write_scalar(scalar, &allocated).unwrap(); |
